Whamcloud - gitweb
LU-10467 ptlrpc: refactor waiting in ptlrpc_set_wait()
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 /** Implementation of client-side PortalRPC interfaces */
34
35 #define DEBUG_SUBSYSTEM S_RPC
36
37 #include <linux/delay.h>
38 #include <linux/random.h>
39
40 #include <lnet/lib-lnet.h>
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_lib.h>
44 #include <lustre_ha.h>
45 #include <lustre_import.h>
46 #include <lustre_req_layout.h>
47
48 #include "ptlrpc_internal.h"
49
50 static void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
51                                       struct page *page, int pageoffset,
52                                       int len)
53 {
54         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
55 }
56
57 static void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
58                                         struct page *page, int pageoffset,
59                                         int len)
60 {
61         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
62 }
63
64 static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
65 {
66         int i;
67
68         for (i = 0; i < desc->bd_iov_count ; i++)
69                 put_page(BD_GET_KIOV(desc, i).kiov_page);
70 }
71
72 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
73                                        void *frag, int len)
74 {
75         unsigned int offset = (uintptr_t)frag & ~PAGE_MASK;
76
77         ENTRY;
78         while (len > 0) {
79                 int page_len = min_t(unsigned int, PAGE_SIZE - offset,
80                                      len);
81                 uintptr_t vaddr = (uintptr_t) frag;
82
83                 ptlrpc_prep_bulk_page_nopin(desc,
84                                             lnet_kvaddr_to_page(vaddr),
85                                             offset, page_len);
86                 offset = 0;
87                 len -= page_len;
88                 frag += page_len;
89         }
90
91         RETURN(desc->bd_nob);
92 }
93
94 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
95         .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
96         .release_frags  = ptlrpc_release_bulk_page_pin,
97 };
98 EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
99
100 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
101         .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
102         .release_frags  = ptlrpc_release_bulk_noop,
103         .add_iov_frag   = ptlrpc_prep_bulk_frag_pages,
104 };
105 EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
106
107 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = {
108         .add_iov_frag = ptlrpc_prep_bulk_frag,
109 };
110 EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops);
111
112 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
113 static int ptlrpcd_check_work(struct ptlrpc_request *req);
114 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
115
116 /**
117  * Initialize passed in client structure \a cl.
118  */
119 void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
120                         struct ptlrpc_client *cl)
121 {
122         cl->cli_request_portal = req_portal;
123         cl->cli_reply_portal   = rep_portal;
124         cl->cli_name           = name;
125 }
126 EXPORT_SYMBOL(ptlrpc_init_client);
127
128 /**
129  * Return PortalRPC connection for remore uud \a uuid
130  */
131 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
132                                                     lnet_nid_t nid4refnet)
133 {
134         struct ptlrpc_connection *c;
135         lnet_nid_t self;
136         struct lnet_process_id peer;
137         int err;
138
139         /*
140          * ptlrpc_uuid_to_peer() initializes its 2nd parameter
141          * before accessing its values.
142          */
143         /* coverity[uninit_use_in_call] */
144         peer.nid = nid4refnet;
145         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
146         if (err != 0) {
147                 CNETERR("cannot find peer %s!\n", uuid->uuid);
148                 return NULL;
149         }
150
151         c = ptlrpc_connection_get(peer, self, uuid);
152         if (c) {
153                 memcpy(c->c_remote_uuid.uuid,
154                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
155         }
156
157         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
158
159         return c;
160 }
161
162 /**
163  * Allocate and initialize new bulk descriptor on the sender.
164  * Returns pointer to the descriptor or NULL on error.
165  */
166 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
167                                          unsigned int max_brw,
168                                          enum ptlrpc_bulk_op_type type,
169                                          unsigned int portal,
170                                          const struct ptlrpc_bulk_frag_ops *ops)
171 {
172         struct ptlrpc_bulk_desc *desc;
173         int i;
174
175         /* ensure that only one of KIOV or IOVEC is set but not both */
176         LASSERT((ptlrpc_is_bulk_desc_kiov(type) &&
177                  ops->add_kiov_frag != NULL) ||
178                 (ptlrpc_is_bulk_desc_kvec(type) &&
179                  ops->add_iov_frag != NULL));
180
181         OBD_ALLOC_PTR(desc);
182         if (!desc)
183                 return NULL;
184         if (type & PTLRPC_BULK_BUF_KIOV) {
185                 OBD_ALLOC_LARGE(GET_KIOV(desc),
186                                 nfrags * sizeof(*GET_KIOV(desc)));
187                 if (!GET_KIOV(desc))
188                         goto out;
189         } else {
190                 OBD_ALLOC_LARGE(GET_KVEC(desc),
191                                 nfrags * sizeof(*GET_KVEC(desc)));
192                 if (!GET_KVEC(desc))
193                         goto out;
194         }
195
196         spin_lock_init(&desc->bd_lock);
197         init_waitqueue_head(&desc->bd_waitq);
198         desc->bd_max_iov = nfrags;
199         desc->bd_iov_count = 0;
200         desc->bd_portal = portal;
201         desc->bd_type = type;
202         desc->bd_md_count = 0;
203         desc->bd_frag_ops = ops;
204         LASSERT(max_brw > 0);
205         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
206         /*
207          * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
208          * node. Negotiated ocd_brw_size will always be <= this number.
209          */
210         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
211                 LNetInvalidateMDHandle(&desc->bd_mds[i]);
212
213         return desc;
214 out:
215         OBD_FREE_PTR(desc);
216         return NULL;
217 }
218
219 /**
220  * Prepare bulk descriptor for specified outgoing request \a req that
221  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
222  * the bulk to be sent. Used on client-side.
223  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
224  * error.
225  */
226 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
227                                               unsigned int nfrags,
228                                               unsigned int max_brw,
229                                               unsigned int type,
230                                               unsigned int portal,
231                                               const struct ptlrpc_bulk_frag_ops
232                                                 *ops)
233 {
234         struct obd_import *imp = req->rq_import;
235         struct ptlrpc_bulk_desc *desc;
236
237         ENTRY;
238         LASSERT(ptlrpc_is_bulk_op_passive(type));
239
240         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
241         if (!desc)
242                 RETURN(NULL);
243
244         desc->bd_import = class_import_get(imp);
245         desc->bd_req = req;
246
247         desc->bd_cbid.cbid_fn  = client_bulk_callback;
248         desc->bd_cbid.cbid_arg = desc;
249
250         /* This makes req own desc, and free it when she frees herself */
251         req->rq_bulk = desc;
252
253         return desc;
254 }
255 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
256
257 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
258                              struct page *page, int pageoffset, int len,
259                              int pin)
260 {
261         lnet_kiov_t *kiov;
262
263         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
264         LASSERT(page != NULL);
265         LASSERT(pageoffset >= 0);
266         LASSERT(len > 0);
267         LASSERT(pageoffset + len <= PAGE_SIZE);
268         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
269
270         kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
271
272         desc->bd_nob += len;
273
274         if (pin)
275                 get_page(page);
276
277         kiov->kiov_page = page;
278         kiov->kiov_offset = pageoffset;
279         kiov->kiov_len = len;
280
281         desc->bd_iov_count++;
282 }
283 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
284
285 int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
286                           void *frag, int len)
287 {
288         struct kvec *iovec;
289
290         ENTRY;
291
292         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
293         LASSERT(frag != NULL);
294         LASSERT(len > 0);
295         LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
296
297         iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
298
299         desc->bd_nob += len;
300
301         iovec->iov_base = frag;
302         iovec->iov_len = len;
303
304         desc->bd_iov_count++;
305
306         RETURN(desc->bd_nob);
307 }
308 EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
309
310 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
311 {
312         ENTRY;
313
314         LASSERT(desc != NULL);
315         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
316         LASSERT(desc->bd_md_count == 0);         /* network hands off */
317         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
318         LASSERT(desc->bd_frag_ops != NULL);
319
320         if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
321                 sptlrpc_enc_pool_put_pages(desc);
322
323         if (desc->bd_export)
324                 class_export_put(desc->bd_export);
325         else
326                 class_import_put(desc->bd_import);
327
328         if (desc->bd_frag_ops->release_frags != NULL)
329                 desc->bd_frag_ops->release_frags(desc);
330
331         if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
332                 OBD_FREE_LARGE(GET_KIOV(desc),
333                                desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
334         else
335                 OBD_FREE_LARGE(GET_KVEC(desc),
336                                desc->bd_max_iov * sizeof(*GET_KVEC(desc)));
337         OBD_FREE_PTR(desc);
338         EXIT;
339 }
340 EXPORT_SYMBOL(ptlrpc_free_bulk);
341
342 /**
343  * Set server timelimit for this req, i.e. how long are we willing to wait
344  * for reply before timing out this request.
345  */
346 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
347 {
348         __u32 serv_est;
349         int idx;
350         struct imp_at *at;
351
352         LASSERT(req->rq_import);
353
354         if (AT_OFF) {
355                 /* non-AT settings */
356                 /**
357                  * \a imp_server_timeout means this is reverse import and
358                  * we send (currently only) ASTs to the client and cannot afford
359                  * to wait too long for the reply, otherwise the other client
360                  * (because of which we are sending this request) would
361                  * timeout waiting for us
362                  */
363                 req->rq_timeout = req->rq_import->imp_server_timeout ?
364                                   obd_timeout / 2 : obd_timeout;
365         } else {
366                 at = &req->rq_import->imp_at;
367                 idx = import_at_get_index(req->rq_import,
368                                           req->rq_request_portal);
369                 serv_est = at_get(&at->iat_service_estimate[idx]);
370                 req->rq_timeout = at_est2timeout(serv_est);
371         }
372         /*
373          * We could get even fancier here, using history to predict increased
374          * loading...
375          */
376
377         /*
378          * Let the server know what this RPC timeout is by putting it in the
379          * reqmsg
380          */
381         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
382 }
383 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
384
385 /* Adjust max service estimate based on server value */
386 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
387                                   unsigned int serv_est)
388 {
389         int idx;
390         unsigned int oldse;
391         struct imp_at *at;
392
393         LASSERT(req->rq_import);
394         at = &req->rq_import->imp_at;
395
396         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
397         /*
398          * max service estimates are tracked on the server side,
399          * so just keep minimal history here
400          */
401         oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
402         if (oldse != 0)
403                 CDEBUG(D_ADAPTTO,
404                        "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
405                        req->rq_import->imp_obd->obd_name,
406                        req->rq_request_portal,
407                        oldse, at_get(&at->iat_service_estimate[idx]));
408 }
409
410 /* Expected network latency per remote node (secs) */
411 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
412 {
413         return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
414 }
415
416 /* Adjust expected network latency */
417 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
418                                unsigned int service_time)
419 {
420         unsigned int nl, oldnl;
421         struct imp_at *at;
422         time64_t now = ktime_get_real_seconds();
423
424         LASSERT(req->rq_import);
425
426         if (service_time > now - req->rq_sent + 3) {
427                 /*
428                  * b=16408, however, this can also happen if early reply
429                  * is lost and client RPC is expired and resent, early reply
430                  * or reply of original RPC can still be fit in reply buffer
431                  * of resent RPC, now client is measuring time from the
432                  * resent time, but server sent back service time of original
433                  * RPC.
434                  */
435                 CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
436                        D_ADAPTTO : D_WARNING,
437                        "Reported service time %u > total measured time %lld\n",
438                        service_time, now - req->rq_sent);
439                 return;
440         }
441
442         /* Network latency is total time less server processing time */
443         nl = max_t(int, now - req->rq_sent -
444                         service_time, 0) + 1; /* st rounding */
445         at = &req->rq_import->imp_at;
446
447         oldnl = at_measured(&at->iat_net_latency, nl);
448         if (oldnl != 0)
449                 CDEBUG(D_ADAPTTO,
450                        "The network latency for %s (nid %s) has changed from %d to %d\n",
451                        req->rq_import->imp_obd->obd_name,
452                        obd_uuid2str(&req->rq_import->imp_connection->c_remote_uuid),
453                        oldnl, at_get(&at->iat_net_latency));
454 }
455
456 static int unpack_reply(struct ptlrpc_request *req)
457 {
458         int rc;
459
460         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
461                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
462                 if (rc) {
463                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: rc = %d",
464                                   rc);
465                         return -EPROTO;
466                 }
467         }
468
469         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
470         if (rc) {
471                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: rc = %d",
472                           rc);
473                 return -EPROTO;
474         }
475         return 0;
476 }
477
478 /**
479  * Handle an early reply message, called with the rq_lock held.
480  * If anything goes wrong just ignore it - same as if it never happened
481  */
482 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
483 __must_hold(&req->rq_lock)
484 {
485         struct ptlrpc_request *early_req;
486         time64_t olddl;
487         int rc;
488
489         ENTRY;
490         req->rq_early = 0;
491         spin_unlock(&req->rq_lock);
492
493         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
494         if (rc) {
495                 spin_lock(&req->rq_lock);
496                 RETURN(rc);
497         }
498
499         rc = unpack_reply(early_req);
500         if (rc != 0) {
501                 sptlrpc_cli_finish_early_reply(early_req);
502                 spin_lock(&req->rq_lock);
503                 RETURN(rc);
504         }
505
506         /*
507          * Use new timeout value just to adjust the local value for this
508          * request, don't include it into at_history. It is unclear yet why
509          * service time increased and should it be counted or skipped, e.g.
510          * that can be recovery case or some error or server, the real reply
511          * will add all new data if it is worth to add.
512          */
513         req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
514         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
515
516         /* Network latency can be adjusted, it is pure network delays */
517         ptlrpc_at_adj_net_latency(req,
518                                   lustre_msg_get_service_time(early_req->rq_repmsg));
519
520         sptlrpc_cli_finish_early_reply(early_req);
521
522         spin_lock(&req->rq_lock);
523         olddl = req->rq_deadline;
524         /*
525          * server assumes it now has rq_timeout from when the request
526          * arrived, so the client should give it at least that long.
527          * since we don't know the arrival time we'll use the original
528          * sent time
529          */
530         req->rq_deadline = req->rq_sent + req->rq_timeout +
531                            ptlrpc_at_get_net_latency(req);
532
533         /* The below message is checked in replay-single.sh test_65{a,b} */
534         /* The below message is checked in sanity-{gss,krb5} test_8 */
535         DEBUG_REQ(D_ADAPTTO, req,
536                   "Early reply #%d, new deadline in %llds (%llds)",
537                   req->rq_early_count,
538                   req->rq_deadline - ktime_get_real_seconds(),
539                   req->rq_deadline - olddl);
540
541         RETURN(rc);
542 }
543
544 static struct kmem_cache *request_cache;
545
546 int ptlrpc_request_cache_init(void)
547 {
548         request_cache = kmem_cache_create("ptlrpc_cache",
549                                           sizeof(struct ptlrpc_request),
550                                           0, SLAB_HWCACHE_ALIGN, NULL);
551         return request_cache ? 0 : -ENOMEM;
552 }
553
554 void ptlrpc_request_cache_fini(void)
555 {
556         kmem_cache_destroy(request_cache);
557 }
558
559 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
560 {
561         struct ptlrpc_request *req;
562
563         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
564         return req;
565 }
566
567 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
568 {
569         OBD_SLAB_FREE_PTR(req, request_cache);
570 }
571
572 /**
573  * Wind down request pool \a pool.
574  * Frees all requests from the pool too
575  */
576 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
577 {
578         struct list_head *l, *tmp;
579         struct ptlrpc_request *req;
580
581         LASSERT(pool != NULL);
582
583         spin_lock(&pool->prp_lock);
584         list_for_each_safe(l, tmp, &pool->prp_req_list) {
585                 req = list_entry(l, struct ptlrpc_request, rq_list);
586                 list_del(&req->rq_list);
587                 LASSERT(req->rq_reqbuf);
588                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
589                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
590                 ptlrpc_request_cache_free(req);
591         }
592         spin_unlock(&pool->prp_lock);
593         OBD_FREE(pool, sizeof(*pool));
594 }
595 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
596
597 /**
598  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
599  */
600 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
601 {
602         int i;
603         int size = 1;
604
605         while (size < pool->prp_rq_size)
606                 size <<= 1;
607
608         LASSERTF(list_empty(&pool->prp_req_list) ||
609                  size == pool->prp_rq_size,
610                  "Trying to change pool size with nonempty pool from %d to %d bytes\n",
611                  pool->prp_rq_size, size);
612
613         pool->prp_rq_size = size;
614         for (i = 0; i < num_rq; i++) {
615                 struct ptlrpc_request *req;
616                 struct lustre_msg *msg;
617
618                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
619                 if (!req)
620                         return i;
621                 OBD_ALLOC_LARGE(msg, size);
622                 if (!msg) {
623                         ptlrpc_request_cache_free(req);
624                         return i;
625                 }
626                 req->rq_reqbuf = msg;
627                 req->rq_reqbuf_len = size;
628                 req->rq_pool = pool;
629                 spin_lock(&pool->prp_lock);
630                 list_add_tail(&req->rq_list, &pool->prp_req_list);
631                 spin_unlock(&pool->prp_lock);
632         }
633         return num_rq;
634 }
635 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
636
637 /**
638  * Create and initialize new request pool with given attributes:
639  * \a num_rq - initial number of requests to create for the pool
640  * \a msgsize - maximum message size possible for requests in thid pool
641  * \a populate_pool - function to be called when more requests need to be added
642  *                    to the pool
643  * Returns pointer to newly created pool or NULL on error.
644  */
645 struct ptlrpc_request_pool *
646 ptlrpc_init_rq_pool(int num_rq, int msgsize,
647                     int (*populate_pool)(struct ptlrpc_request_pool *, int))
648 {
649         struct ptlrpc_request_pool *pool;
650
651         OBD_ALLOC_PTR(pool);
652         if (!pool)
653                 return NULL;
654
655         /*
656          * Request next power of two for the allocation, because internally
657          * kernel would do exactly this
658          */
659         spin_lock_init(&pool->prp_lock);
660         INIT_LIST_HEAD(&pool->prp_req_list);
661         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
662         pool->prp_populate = populate_pool;
663
664         populate_pool(pool, num_rq);
665
666         return pool;
667 }
668 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
669
670 /**
671  * Fetches one request from pool \a pool
672  */
673 static struct ptlrpc_request *
674 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
675 {
676         struct ptlrpc_request *request;
677         struct lustre_msg *reqbuf;
678
679         if (!pool)
680                 return NULL;
681
682         spin_lock(&pool->prp_lock);
683
684         /*
685          * See if we have anything in a pool, and bail out if nothing,
686          * in writeout path, where this matters, this is safe to do, because
687          * nothing is lost in this case, and when some in-flight requests
688          * complete, this code will be called again.
689          */
690         if (unlikely(list_empty(&pool->prp_req_list))) {
691                 spin_unlock(&pool->prp_lock);
692                 return NULL;
693         }
694
695         request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
696                              rq_list);
697         list_del_init(&request->rq_list);
698         spin_unlock(&pool->prp_lock);
699
700         LASSERT(request->rq_reqbuf);
701         LASSERT(request->rq_pool);
702
703         reqbuf = request->rq_reqbuf;
704         memset(request, 0, sizeof(*request));
705         request->rq_reqbuf = reqbuf;
706         request->rq_reqbuf_len = pool->prp_rq_size;
707         request->rq_pool = pool;
708
709         return request;
710 }
711
712 /**
713  * Returns freed \a request to pool.
714  */
715 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
716 {
717         struct ptlrpc_request_pool *pool = request->rq_pool;
718
719         spin_lock(&pool->prp_lock);
720         LASSERT(list_empty(&request->rq_list));
721         LASSERT(!request->rq_receiving_reply);
722         list_add_tail(&request->rq_list, &pool->prp_req_list);
723         spin_unlock(&pool->prp_lock);
724 }
725
726 void ptlrpc_add_unreplied(struct ptlrpc_request *req)
727 {
728         struct obd_import *imp = req->rq_import;
729         struct list_head *tmp;
730         struct ptlrpc_request *iter;
731
732         assert_spin_locked(&imp->imp_lock);
733         LASSERT(list_empty(&req->rq_unreplied_list));
734
735         /* unreplied list is sorted by xid in ascending order */
736         list_for_each_prev(tmp, &imp->imp_unreplied_list) {
737                 iter = list_entry(tmp, struct ptlrpc_request,
738                                   rq_unreplied_list);
739
740                 LASSERT(req->rq_xid != iter->rq_xid);
741                 if (req->rq_xid < iter->rq_xid)
742                         continue;
743                 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
744                 return;
745         }
746         list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
747 }
748
749 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
750 {
751         req->rq_xid = ptlrpc_next_xid();
752         ptlrpc_add_unreplied(req);
753 }
754
755 static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
756 {
757         spin_lock(&req->rq_import->imp_lock);
758         ptlrpc_assign_next_xid_nolock(req);
759         spin_unlock(&req->rq_import->imp_lock);
760 }
761
762 static atomic64_t ptlrpc_last_xid;
763
764 static void ptlrpc_reassign_next_xid(struct ptlrpc_request *req)
765 {
766         spin_lock(&req->rq_import->imp_lock);
767         list_del_init(&req->rq_unreplied_list);
768         ptlrpc_assign_next_xid_nolock(req);
769         spin_unlock(&req->rq_import->imp_lock);
770         DEBUG_REQ(D_RPCTRACE, req, "reassign xid");
771 }
772
773 void ptlrpc_get_mod_rpc_slot(struct ptlrpc_request *req)
774 {
775         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
776         __u32 opc;
777         __u16 tag;
778
779         opc = lustre_msg_get_opc(req->rq_reqmsg);
780         tag = obd_get_mod_rpc_slot(cli, opc);
781         lustre_msg_set_tag(req->rq_reqmsg, tag);
782         ptlrpc_reassign_next_xid(req);
783 }
784 EXPORT_SYMBOL(ptlrpc_get_mod_rpc_slot);
785
786 void ptlrpc_put_mod_rpc_slot(struct ptlrpc_request *req)
787 {
788         __u16 tag = lustre_msg_get_tag(req->rq_reqmsg);
789
790         if (tag != 0) {
791                 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
792                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
793
794                 obd_put_mod_rpc_slot(cli, opc, tag);
795         }
796 }
797 EXPORT_SYMBOL(ptlrpc_put_mod_rpc_slot);
798
799 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
800                              __u32 version, int opcode, char **bufs,
801                              struct ptlrpc_cli_ctx *ctx)
802 {
803         int count;
804         struct obd_import *imp;
805         __u32 *lengths;
806         int rc;
807
808         ENTRY;
809
810         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
811         imp = request->rq_import;
812         lengths = request->rq_pill.rc_area[RCL_CLIENT];
813
814         if (ctx) {
815                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
816         } else {
817                 rc = sptlrpc_req_get_ctx(request);
818                 if (rc)
819                         GOTO(out_free, rc);
820         }
821         sptlrpc_req_set_flavor(request, opcode);
822
823         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
824                                  lengths, bufs);
825         if (rc)
826                 GOTO(out_ctx, rc);
827
828         lustre_msg_add_version(request->rq_reqmsg, version);
829         request->rq_send_state = LUSTRE_IMP_FULL;
830         request->rq_type = PTL_RPC_MSG_REQUEST;
831
832         request->rq_req_cbid.cbid_fn  = request_out_callback;
833         request->rq_req_cbid.cbid_arg = request;
834
835         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
836         request->rq_reply_cbid.cbid_arg = request;
837
838         request->rq_reply_deadline = 0;
839         request->rq_bulk_deadline = 0;
840         request->rq_req_deadline = 0;
841         request->rq_phase = RQ_PHASE_NEW;
842         request->rq_next_phase = RQ_PHASE_UNDEFINED;
843
844         request->rq_request_portal = imp->imp_client->cli_request_portal;
845         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
846
847         ptlrpc_at_set_req_timeout(request);
848
849         lustre_msg_set_opc(request->rq_reqmsg, opcode);
850
851         /* Let's setup deadline for req/reply/bulk unlink for opcode. */
852         if (cfs_fail_val == opcode) {
853                 time64_t *fail_t = NULL, *fail2_t = NULL;
854
855                 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
856                         fail_t = &request->rq_bulk_deadline;
857                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
858                         fail_t = &request->rq_reply_deadline;
859                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
860                         fail_t = &request->rq_req_deadline;
861                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
862                         fail_t = &request->rq_reply_deadline;
863                         fail2_t = &request->rq_bulk_deadline;
864                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_ROUND_XID)) {
865                         time64_t now = ktime_get_real_seconds();
866                         u64 xid = ((u64)now >> 4) << 24;
867
868                         atomic64_set(&ptlrpc_last_xid, xid);
869                 }
870
871                 if (fail_t) {
872                         *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
873
874                         if (fail2_t)
875                                 *fail2_t = ktime_get_real_seconds() +
876                                            LONG_UNLINK;
877
878                         /*
879                          * The RPC is infected, let the test to change the
880                          * fail_loc
881                          */
882                         msleep(4 * MSEC_PER_SEC);
883                 }
884         }
885         ptlrpc_assign_next_xid(request);
886
887         RETURN(0);
888
889 out_ctx:
890         LASSERT(!request->rq_pool);
891         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
892 out_free:
893         class_import_put(imp);
894
895         return rc;
896 }
897 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
898
899 /**
900  * Pack request buffers for network transfer, performing necessary encryption
901  * steps if necessary.
902  */
903 int ptlrpc_request_pack(struct ptlrpc_request *request,
904                         __u32 version, int opcode)
905 {
906         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
907 }
908 EXPORT_SYMBOL(ptlrpc_request_pack);
909
910 /**
911  * Helper function to allocate new request on import \a imp
912  * and possibly using existing request from pool \a pool if provided.
913  * Returns allocated request structure with import field filled or
914  * NULL on error.
915  */
916 static inline
917 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
918                                               struct ptlrpc_request_pool *pool)
919 {
920         struct ptlrpc_request *request = NULL;
921
922         request = ptlrpc_request_cache_alloc(GFP_NOFS);
923
924         if (!request && pool)
925                 request = ptlrpc_prep_req_from_pool(pool);
926
927         if (request) {
928                 ptlrpc_cli_req_init(request);
929
930                 LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
931                 LASSERT(imp != LP_POISON);
932                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
933                          imp->imp_client);
934                 LASSERT(imp->imp_client != LP_POISON);
935
936                 request->rq_import = class_import_get(imp);
937         } else {
938                 CERROR("request allocation out of memory\n");
939         }
940
941         return request;
942 }
943
944 /**
945  * Helper function for creating a request.
946  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
947  * buffer structures according to capsule template \a format.
948  * Returns allocated request structure pointer or NULL on error.
949  */
950 static struct ptlrpc_request *
951 ptlrpc_request_alloc_internal(struct obd_import *imp,
952                               struct ptlrpc_request_pool *pool,
953                               const struct req_format *format)
954 {
955         struct ptlrpc_request *request;
956
957         request = __ptlrpc_request_alloc(imp, pool);
958         if (!request)
959                 return NULL;
960
961         /*
962          * initiate connection if needed when the import has been
963          * referenced by the new request to avoid races with disconnect
964          */
965         if (unlikely(imp->imp_state == LUSTRE_IMP_IDLE)) {
966                 int rc;
967
968                 CDEBUG_LIMIT(imp->imp_idle_debug,
969                              "%s: reconnect after %llds idle\n",
970                              imp->imp_obd->obd_name, ktime_get_real_seconds() -
971                                                      imp->imp_last_reply_time);
972                 spin_lock(&imp->imp_lock);
973                 if (imp->imp_state == LUSTRE_IMP_IDLE) {
974                         imp->imp_generation++;
975                         imp->imp_initiated_at = imp->imp_generation;
976                         imp->imp_state = LUSTRE_IMP_NEW;
977
978                         /* connect_import_locked releases imp_lock */
979                         rc = ptlrpc_connect_import_locked(imp);
980                         if (rc < 0) {
981                                 ptlrpc_request_free(request);
982                                 return NULL;
983                         }
984                         ptlrpc_pinger_add_import(imp);
985                 } else {
986                         spin_unlock(&imp->imp_lock);
987                 }
988         }
989
990         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
991         req_capsule_set(&request->rq_pill, format);
992         return request;
993 }
994
995 /**
996  * Allocate new request structure for import \a imp and initialize its
997  * buffer structure according to capsule template \a format.
998  */
999 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
1000                                             const struct req_format *format)
1001 {
1002         return ptlrpc_request_alloc_internal(imp, NULL, format);
1003 }
1004 EXPORT_SYMBOL(ptlrpc_request_alloc);
1005
1006 /**
1007  * Allocate new request structure for import \a imp from pool \a pool and
1008  * initialize its buffer structure according to capsule template \a format.
1009  */
1010 struct ptlrpc_request *
1011 ptlrpc_request_alloc_pool(struct obd_import *imp,
1012                           struct ptlrpc_request_pool *pool,
1013                           const struct req_format *format)
1014 {
1015         return ptlrpc_request_alloc_internal(imp, pool, format);
1016 }
1017 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
1018
1019 /**
1020  * For requests not from pool, free memory of the request structure.
1021  * For requests obtained from a pool earlier, return request back to pool.
1022  */
1023 void ptlrpc_request_free(struct ptlrpc_request *request)
1024 {
1025         if (request->rq_pool)
1026                 __ptlrpc_free_req_to_pool(request);
1027         else
1028                 ptlrpc_request_cache_free(request);
1029 }
1030 EXPORT_SYMBOL(ptlrpc_request_free);
1031
1032 /**
1033  * Allocate new request for operatione \a opcode and immediatelly pack it for
1034  * network transfer.
1035  * Only used for simple requests like OBD_PING where the only important
1036  * part of the request is operation itself.
1037  * Returns allocated request or NULL on error.
1038  */
1039 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1040                                                  const struct req_format *format,
1041                                                  __u32 version, int opcode)
1042 {
1043         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
1044         int rc;
1045
1046         if (req) {
1047                 rc = ptlrpc_request_pack(req, version, opcode);
1048                 if (rc) {
1049                         ptlrpc_request_free(req);
1050                         req = NULL;
1051                 }
1052         }
1053         return req;
1054 }
1055 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
1056
1057 /**
1058  * Allocate and initialize new request set structure on the current CPT.
1059  * Returns a pointer to the newly allocated set structure or NULL on error.
1060  */
1061 struct ptlrpc_request_set *ptlrpc_prep_set(void)
1062 {
1063         struct ptlrpc_request_set *set;
1064         int cpt;
1065
1066         ENTRY;
1067         cpt = cfs_cpt_current(cfs_cpt_table, 0);
1068         OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof(*set));
1069         if (!set)
1070                 RETURN(NULL);
1071         atomic_set(&set->set_refcount, 1);
1072         INIT_LIST_HEAD(&set->set_requests);
1073         init_waitqueue_head(&set->set_waitq);
1074         atomic_set(&set->set_new_count, 0);
1075         atomic_set(&set->set_remaining, 0);
1076         spin_lock_init(&set->set_new_req_lock);
1077         INIT_LIST_HEAD(&set->set_new_requests);
1078         set->set_max_inflight = UINT_MAX;
1079         set->set_producer     = NULL;
1080         set->set_producer_arg = NULL;
1081         set->set_rc           = 0;
1082
1083         RETURN(set);
1084 }
1085 EXPORT_SYMBOL(ptlrpc_prep_set);
1086
1087 /**
1088  * Allocate and initialize new request set structure with flow control
1089  * extension. This extension allows to control the number of requests in-flight
1090  * for the whole set. A callback function to generate requests must be provided
1091  * and the request set will keep the number of requests sent over the wire to
1092  * @max_inflight.
1093  * Returns a pointer to the newly allocated set structure or NULL on error.
1094  */
1095 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1096                                              void *arg)
1097
1098 {
1099         struct ptlrpc_request_set *set;
1100
1101         set = ptlrpc_prep_set();
1102         if (!set)
1103                 RETURN(NULL);
1104
1105         set->set_max_inflight  = max;
1106         set->set_producer      = func;
1107         set->set_producer_arg  = arg;
1108
1109         RETURN(set);
1110 }
1111
1112 /**
1113  * Wind down and free request set structure previously allocated with
1114  * ptlrpc_prep_set.
1115  * Ensures that all requests on the set have completed and removes
1116  * all requests from the request list in a set.
1117  * If any unsent request happen to be on the list, pretends that they got
1118  * an error in flight and calls their completion handler.
1119  */
1120 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
1121 {
1122         struct list_head *tmp;
1123         struct list_head *next;
1124         int expected_phase;
1125         int n = 0;
1126
1127         ENTRY;
1128
1129         /* Requests on the set should either all be completed, or all be new */
1130         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
1131                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
1132         list_for_each(tmp, &set->set_requests) {
1133                 struct ptlrpc_request *req =
1134                         list_entry(tmp, struct ptlrpc_request,
1135                                    rq_set_chain);
1136
1137                 LASSERT(req->rq_phase == expected_phase);
1138                 n++;
1139         }
1140
1141         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
1142                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
1143                  atomic_read(&set->set_remaining), n);
1144
1145         list_for_each_safe(tmp, next, &set->set_requests) {
1146                 struct ptlrpc_request *req =
1147                         list_entry(tmp, struct ptlrpc_request,
1148                                    rq_set_chain);
1149                 list_del_init(&req->rq_set_chain);
1150
1151                 LASSERT(req->rq_phase == expected_phase);
1152
1153                 if (req->rq_phase == RQ_PHASE_NEW) {
1154                         ptlrpc_req_interpret(NULL, req, -EBADR);
1155                         atomic_dec(&set->set_remaining);
1156                 }
1157
1158                 spin_lock(&req->rq_lock);
1159                 req->rq_set = NULL;
1160                 req->rq_invalid_rqset = 0;
1161                 spin_unlock(&req->rq_lock);
1162
1163                 ptlrpc_req_finished(req);
1164         }
1165
1166         LASSERT(atomic_read(&set->set_remaining) == 0);
1167
1168         ptlrpc_reqset_put(set);
1169         EXIT;
1170 }
1171 EXPORT_SYMBOL(ptlrpc_set_destroy);
1172
1173 /**
1174  * Add a new request to the general purpose request set.
1175  * Assumes request reference from the caller.
1176  */
1177 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
1178                         struct ptlrpc_request *req)
1179 {
1180         LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
1181         LASSERT(list_empty(&req->rq_set_chain));
1182
1183         if (req->rq_allow_intr)
1184                 set->set_allow_intr = 1;
1185
1186         /* The set takes over the caller's request reference */
1187         list_add_tail(&req->rq_set_chain, &set->set_requests);
1188         req->rq_set = set;
1189         atomic_inc(&set->set_remaining);
1190         req->rq_queued_time = ktime_get_seconds();
1191
1192         if (req->rq_reqmsg)
1193                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1194
1195         if (set->set_producer)
1196                 /*
1197                  * If the request set has a producer callback, the RPC must be
1198                  * sent straight away
1199                  */
1200                 ptlrpc_send_new_req(req);
1201 }
1202 EXPORT_SYMBOL(ptlrpc_set_add_req);
1203
1204 /**
1205  * Add a request to a request with dedicated server thread
1206  * and wake the thread to make any necessary processing.
1207  * Currently only used for ptlrpcd.
1208  */
1209 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1210                             struct ptlrpc_request *req)
1211 {
1212         struct ptlrpc_request_set *set = pc->pc_set;
1213         int count, i;
1214
1215         LASSERT(req->rq_set == NULL);
1216         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1217
1218         spin_lock(&set->set_new_req_lock);
1219         /*
1220          * The set takes over the caller's request reference.
1221          */
1222         req->rq_set = set;
1223         req->rq_queued_time = ktime_get_seconds();
1224         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1225         count = atomic_inc_return(&set->set_new_count);
1226         spin_unlock(&set->set_new_req_lock);
1227
1228         /* Only need to call wakeup once for the first entry. */
1229         if (count == 1) {
1230                 wake_up(&set->set_waitq);
1231
1232                 /*
1233                  * XXX: It maybe unnecessary to wakeup all the partners. But to
1234                  *      guarantee the async RPC can be processed ASAP, we have
1235                  *      no other better choice. It maybe fixed in future.
1236                  */
1237                 for (i = 0; i < pc->pc_npartners; i++)
1238                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1239         }
1240 }
1241
1242 /**
1243  * Based on the current state of the import, determine if the request
1244  * can be sent, is an error, or should be delayed.
1245  *
1246  * Returns true if this request should be delayed. If false, and
1247  * *status is set, then the request can not be sent and *status is the
1248  * error code.  If false and status is 0, then request can be sent.
1249  *
1250  * The imp->imp_lock must be held.
1251  */
1252 static int ptlrpc_import_delay_req(struct obd_import *imp,
1253                                    struct ptlrpc_request *req, int *status)
1254 {
1255         int delay = 0;
1256
1257         ENTRY;
1258         LASSERT(status);
1259         *status = 0;
1260
1261         if (req->rq_ctx_init || req->rq_ctx_fini) {
1262                 /* always allow ctx init/fini rpc go through */
1263         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1264                 DEBUG_REQ(D_ERROR, req, "Uninitialized import");
1265                 *status = -EIO;
1266         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1267                 unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
1268
1269                 /*
1270                  * pings or MDS-equivalent STATFS may safely
1271                  * race with umount
1272                  */
1273                 DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
1274                           D_HA : D_ERROR, req, "IMP_CLOSED");
1275                 *status = -EIO;
1276         } else if (ptlrpc_send_limit_expired(req)) {
1277                 /* probably doesn't need to be a D_ERROR afterinitial testing */
1278                 DEBUG_REQ(D_HA, req, "send limit expired");
1279                 *status = -ETIMEDOUT;
1280         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1281                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1282                 ;/* allow CONNECT even if import is invalid */
1283                 if (atomic_read(&imp->imp_inval_count) != 0) {
1284                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1285                         *status = -EIO;
1286                 }
1287         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1288                 if (!imp->imp_deactive)
1289                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1290                 *status = -ESHUTDOWN; /* b=12940 */
1291         } else if (req->rq_import_generation != imp->imp_generation) {
1292                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
1293                 *status = -EIO;
1294         } else if (req->rq_send_state != imp->imp_state) {
1295                 /* invalidate in progress - any requests should be drop */
1296                 if (atomic_read(&imp->imp_inval_count) != 0) {
1297                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1298                         *status = -EIO;
1299                 } else if (req->rq_no_delay &&
1300                            imp->imp_generation != imp->imp_initiated_at) {
1301                         /* ignore nodelay for requests initiating connections */
1302                         *status = -EWOULDBLOCK;
1303                 } else if (req->rq_allow_replay &&
1304                            (imp->imp_state == LUSTRE_IMP_REPLAY ||
1305                             imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1306                             imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1307                             imp->imp_state == LUSTRE_IMP_RECOVER)) {
1308                         DEBUG_REQ(D_HA, req, "allow during recovery");
1309                 } else {
1310                         delay = 1;
1311                 }
1312         }
1313
1314         RETURN(delay);
1315 }
1316
1317 /**
1318  * Decide if the error message should be printed to the console or not.
1319  * Makes its decision based on request type, status, and failure frequency.
1320  *
1321  * \param[in] req  request that failed and may need a console message
1322  *
1323  * \retval false if no message should be printed
1324  * \retval true  if console message should be printed
1325  */
1326 static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
1327 {
1328         LASSERT(req->rq_reqmsg != NULL);
1329
1330         /* Suppress particular reconnect errors which are to be expected. */
1331         if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
1332                 /* Suppress timed out reconnect requests */
1333                 if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
1334                     req->rq_timedout)
1335                         return false;
1336
1337                 /*
1338                  * Suppress most unavailable/again reconnect requests, but
1339                  * print occasionally so it is clear client is trying to
1340                  * connect to a server where no target is running.
1341                  */
1342                 if ((err == -ENODEV || err == -EAGAIN) &&
1343                     req->rq_import->imp_conn_cnt % 30 != 20)
1344                         return false;
1345         }
1346
1347         if (opc == LDLM_ENQUEUE && err == -EAGAIN)
1348                 /* -EAGAIN is normal when using POSIX flocks */
1349                 return false;
1350
1351         if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
1352             (req->rq_xid & 0xf) != 10)
1353                 /* Suppress most ping requests, they may fail occasionally */
1354                 return false;
1355
1356         return true;
1357 }
1358
1359 /**
1360  * Check request processing status.
1361  * Returns the status.
1362  */
1363 static int ptlrpc_check_status(struct ptlrpc_request *req)
1364 {
1365         int rc;
1366
1367         ENTRY;
1368         rc = lustre_msg_get_status(req->rq_repmsg);
1369         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1370                 struct obd_import *imp = req->rq_import;
1371                 lnet_nid_t nid = imp->imp_connection->c_peer.nid;
1372                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1373
1374                 if (ptlrpc_console_allow(req, opc, rc))
1375                         LCONSOLE_ERROR_MSG(0x11,
1376                                            "%s: operation %s to node %s failed: rc = %d\n",
1377                                            imp->imp_obd->obd_name,
1378                                            ll_opcode2str(opc),
1379                                            libcfs_nid2str(nid), rc);
1380                 RETURN(rc < 0 ? rc : -EINVAL);
1381         }
1382
1383         if (rc)
1384                 DEBUG_REQ(D_INFO, req, "check status: rc = %d", rc);
1385
1386         RETURN(rc);
1387 }
1388
1389 /**
1390  * save pre-versions of objects into request for replay.
1391  * Versions are obtained from server reply.
1392  * used for VBR.
1393  */
1394 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1395 {
1396         struct lustre_msg *repmsg = req->rq_repmsg;
1397         struct lustre_msg *reqmsg = req->rq_reqmsg;
1398         __u64 *versions = lustre_msg_get_versions(repmsg);
1399
1400         ENTRY;
1401         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1402                 return;
1403
1404         LASSERT(versions);
1405         lustre_msg_set_versions(reqmsg, versions);
1406         CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
1407                versions[0], versions[1]);
1408
1409         EXIT;
1410 }
1411
1412 __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1413 {
1414         struct ptlrpc_request *req;
1415
1416         assert_spin_locked(&imp->imp_lock);
1417         if (list_empty(&imp->imp_unreplied_list))
1418                 return 0;
1419
1420         req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
1421                          rq_unreplied_list);
1422         LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1423
1424         if (imp->imp_known_replied_xid < req->rq_xid - 1)
1425                 imp->imp_known_replied_xid = req->rq_xid - 1;
1426
1427         return req->rq_xid - 1;
1428 }
1429
1430 /**
1431  * Callback function called when client receives RPC reply for \a req.
1432  * Returns 0 on success or error code.
1433  * The return alue would be assigned to req->rq_status by the caller
1434  * as request processing status.
1435  * This function also decides if the request needs to be saved for later replay.
1436  */
1437 static int after_reply(struct ptlrpc_request *req)
1438 {
1439         struct obd_import *imp = req->rq_import;
1440         struct obd_device *obd = req->rq_import->imp_obd;
1441         ktime_t work_start;
1442         u64 committed;
1443         s64 timediff;
1444         int rc;
1445
1446         ENTRY;
1447         LASSERT(obd != NULL);
1448         /* repbuf must be unlinked */
1449         LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
1450
1451         if (req->rq_reply_truncated) {
1452                 if (ptlrpc_no_resend(req)) {
1453                         DEBUG_REQ(D_ERROR, req,
1454                                   "reply buffer overflow, expected=%d, actual size=%d",
1455                                   req->rq_nob_received, req->rq_repbuf_len);
1456                         RETURN(-EOVERFLOW);
1457                 }
1458
1459                 sptlrpc_cli_free_repbuf(req);
1460                 /*
1461                  * Pass the required reply buffer size (include
1462                  * space for early reply).
1463                  * NB: no need to roundup because alloc_repbuf
1464                  * will roundup it
1465                  */
1466                 req->rq_replen = req->rq_nob_received;
1467                 req->rq_nob_received = 0;
1468                 spin_lock(&req->rq_lock);
1469                 req->rq_resend       = 1;
1470                 spin_unlock(&req->rq_lock);
1471                 RETURN(0);
1472         }
1473
1474         work_start = ktime_get_real();
1475         timediff = ktime_us_delta(work_start, req->rq_sent_ns);
1476
1477         /*
1478          * NB Until this point, the whole of the incoming message,
1479          * including buflens, status etc is in the sender's byte order.
1480          */
1481         rc = sptlrpc_cli_unwrap_reply(req);
1482         if (rc) {
1483                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed: rc = %d", rc);
1484                 RETURN(rc);
1485         }
1486
1487         /*
1488          * Security layer unwrap might ask resend this request.
1489          */
1490         if (req->rq_resend)
1491                 RETURN(0);
1492
1493         rc = unpack_reply(req);
1494         if (rc)
1495                 RETURN(rc);
1496
1497         /* retry indefinitely on EINPROGRESS */
1498         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1499             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1500                 time64_t now = ktime_get_real_seconds();
1501
1502                 DEBUG_REQ((req->rq_nr_resend % 8 == 1 ? D_WARNING : 0) |
1503                           D_RPCTRACE, req, "resending request on EINPROGRESS");
1504                 spin_lock(&req->rq_lock);
1505                 req->rq_resend = 1;
1506                 spin_unlock(&req->rq_lock);
1507                 req->rq_nr_resend++;
1508
1509                 /* Readjust the timeout for current conditions */
1510                 ptlrpc_at_set_req_timeout(req);
1511                 /*
1512                  * delay resend to give a chance to the server to get ready.
1513                  * The delay is increased by 1s on every resend and is capped to
1514                  * the current request timeout (i.e. obd_timeout if AT is off,
1515                  * or AT service time x 125% + 5s, see at_est2timeout)
1516                  */
1517                 if (req->rq_nr_resend > req->rq_timeout)
1518                         req->rq_sent = now + req->rq_timeout;
1519                 else
1520                         req->rq_sent = now + req->rq_nr_resend;
1521
1522                 /* Resend for EINPROGRESS will use a new XID */
1523                 spin_lock(&imp->imp_lock);
1524                 list_del_init(&req->rq_unreplied_list);
1525                 spin_unlock(&imp->imp_lock);
1526
1527                 RETURN(0);
1528         }
1529
1530         if (obd->obd_svc_stats) {
1531                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1532                                     timediff);
1533                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1534         }
1535
1536         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1537             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1538                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1539                           lustre_msg_get_type(req->rq_repmsg));
1540                 RETURN(-EPROTO);
1541         }
1542
1543         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1544                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1545         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1546         ptlrpc_at_adj_net_latency(req,
1547                                   lustre_msg_get_service_time(req->rq_repmsg));
1548
1549         rc = ptlrpc_check_status(req);
1550
1551         if (rc) {
1552                 /*
1553                  * Either we've been evicted, or the server has failed for
1554                  * some reason. Try to reconnect, and if that fails, punt to
1555                  * the upcall.
1556                  */
1557                 if (ptlrpc_recoverable_error(rc)) {
1558                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1559                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1560                                 RETURN(rc);
1561                         }
1562                         ptlrpc_request_handle_notconn(req);
1563                         RETURN(rc);
1564                 }
1565         } else {
1566                 /*
1567                  * Let's look if server sent slv. Do it only for RPC with
1568                  * rc == 0.
1569                  */
1570                 ldlm_cli_update_pool(req);
1571         }
1572
1573         /*
1574          * Store transno in reqmsg for replay.
1575          */
1576         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1577                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1578                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1579         }
1580
1581         if (imp->imp_replayable) {
1582                 spin_lock(&imp->imp_lock);
1583                 /*
1584                  * No point in adding already-committed requests to the replay
1585                  * list, we will just remove them immediately. b=9829
1586                  */
1587                 if (req->rq_transno != 0 &&
1588                     (req->rq_transno >
1589                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1590                      req->rq_replay)) {
1591                         /** version recovery */
1592                         ptlrpc_save_versions(req);
1593                         ptlrpc_retain_replayable_request(req, imp);
1594                 } else if (req->rq_commit_cb &&
1595                            list_empty(&req->rq_replay_list)) {
1596                         /*
1597                          * NB: don't call rq_commit_cb if it's already on
1598                          * rq_replay_list, ptlrpc_free_committed() will call
1599                          * it later, see LU-3618 for details
1600                          */
1601                         spin_unlock(&imp->imp_lock);
1602                         req->rq_commit_cb(req);
1603                         spin_lock(&imp->imp_lock);
1604                 }
1605
1606                 /*
1607                  * Replay-enabled imports return commit-status information.
1608                  */
1609                 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1610                 if (likely(committed > imp->imp_peer_committed_transno))
1611                         imp->imp_peer_committed_transno = committed;
1612
1613                 ptlrpc_free_committed(imp);
1614
1615                 if (!list_empty(&imp->imp_replay_list)) {
1616                         struct ptlrpc_request *last;
1617
1618                         last = list_entry(imp->imp_replay_list.prev,
1619                                           struct ptlrpc_request,
1620                                           rq_replay_list);
1621                         /*
1622                          * Requests with rq_replay stay on the list even if no
1623                          * commit is expected.
1624                          */
1625                         if (last->rq_transno > imp->imp_peer_committed_transno)
1626                                 ptlrpc_pinger_commit_expected(imp);
1627                 }
1628
1629                 spin_unlock(&imp->imp_lock);
1630         }
1631
1632         RETURN(rc);
1633 }
1634
1635 /**
1636  * Helper function to send request \a req over the network for the first time
1637  * Also adjusts request phase.
1638  * Returns 0 on success or error code.
1639  */
1640 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1641 {
1642         struct obd_import *imp = req->rq_import;
1643         __u64 min_xid = 0;
1644         int rc;
1645
1646         ENTRY;
1647         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1648
1649         /* do not try to go further if there is not enough memory in enc_pool */
1650         if (req->rq_sent && req->rq_bulk)
1651                 if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
1652                     pool_is_at_full_capacity())
1653                         RETURN(-ENOMEM);
1654
1655         if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1656             (!req->rq_generation_set ||
1657              req->rq_import_generation == imp->imp_generation))
1658                 RETURN(0);
1659
1660         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1661
1662         spin_lock(&imp->imp_lock);
1663
1664         LASSERT(req->rq_xid != 0);
1665         LASSERT(!list_empty(&req->rq_unreplied_list));
1666
1667         if (!req->rq_generation_set)
1668                 req->rq_import_generation = imp->imp_generation;
1669
1670         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1671                 spin_lock(&req->rq_lock);
1672                 req->rq_waiting = 1;
1673                 spin_unlock(&req->rq_lock);
1674
1675                 DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
1676                           ptlrpc_import_state_name(req->rq_send_state),
1677                           ptlrpc_import_state_name(imp->imp_state));
1678                 LASSERT(list_empty(&req->rq_list));
1679                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1680                 atomic_inc(&req->rq_import->imp_inflight);
1681                 spin_unlock(&imp->imp_lock);
1682                 RETURN(0);
1683         }
1684
1685         if (rc != 0) {
1686                 spin_unlock(&imp->imp_lock);
1687                 req->rq_status = rc;
1688                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1689                 RETURN(rc);
1690         }
1691
1692         LASSERT(list_empty(&req->rq_list));
1693         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1694         atomic_inc(&req->rq_import->imp_inflight);
1695
1696         /*
1697          * find the known replied XID from the unreplied list, CONNECT
1698          * and DISCONNECT requests are skipped to make the sanity check
1699          * on server side happy. see process_req_last_xid().
1700          *
1701          * For CONNECT: Because replay requests have lower XID, it'll
1702          * break the sanity check if CONNECT bump the exp_last_xid on
1703          * server.
1704          *
1705          * For DISCONNECT: Since client will abort inflight RPC before
1706          * sending DISCONNECT, DISCONNECT may carry an XID which higher
1707          * than the inflight RPC.
1708          */
1709         if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1710                 min_xid = ptlrpc_known_replied_xid(imp);
1711         spin_unlock(&imp->imp_lock);
1712
1713         lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1714
1715         lustre_msg_set_status(req->rq_reqmsg, current_pid());
1716
1717         rc = sptlrpc_req_refresh_ctx(req, -1);
1718         if (rc) {
1719                 if (req->rq_err) {
1720                         req->rq_status = rc;
1721                         RETURN(1);
1722                 } else {
1723                         spin_lock(&req->rq_lock);
1724                         req->rq_wait_ctx = 1;
1725                         spin_unlock(&req->rq_lock);
1726                         RETURN(0);
1727                 }
1728         }
1729
1730         CDEBUG(D_RPCTRACE,
1731                "Sending RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
1732                req, current_comm(),
1733                imp->imp_obd->obd_uuid.uuid,
1734                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1735                obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg),
1736                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
1737
1738         rc = ptl_send_rpc(req, 0);
1739         if (rc == -ENOMEM) {
1740                 spin_lock(&imp->imp_lock);
1741                 if (!list_empty(&req->rq_list)) {
1742                         list_del_init(&req->rq_list);
1743                         atomic_dec(&req->rq_import->imp_inflight);
1744                 }
1745                 spin_unlock(&imp->imp_lock);
1746                 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1747                 RETURN(rc);
1748         }
1749         if (rc) {
1750                 DEBUG_REQ(D_HA, req, "send failed, expect timeout: rc = %d",
1751                           rc);
1752                 spin_lock(&req->rq_lock);
1753                 req->rq_net_err = 1;
1754                 spin_unlock(&req->rq_lock);
1755                 RETURN(rc);
1756         }
1757         RETURN(0);
1758 }
1759
1760 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1761 {
1762         int remaining, rc;
1763
1764         ENTRY;
1765         LASSERT(set->set_producer != NULL);
1766
1767         remaining = atomic_read(&set->set_remaining);
1768
1769         /*
1770          * populate the ->set_requests list with requests until we
1771          * reach the maximum number of RPCs in flight for this set
1772          */
1773         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1774                 rc = set->set_producer(set, set->set_producer_arg);
1775                 if (rc == -ENOENT) {
1776                         /* no more RPC to produce */
1777                         set->set_producer     = NULL;
1778                         set->set_producer_arg = NULL;
1779                         RETURN(0);
1780                 }
1781         }
1782
1783         RETURN((atomic_read(&set->set_remaining) - remaining));
1784 }
1785
1786 /**
1787  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1788  * and no more replies are expected.
1789  * (it is possible to get less replies than requests sent e.g. due to timed out
1790  * requests or requests that we had trouble to send out)
1791  *
1792  * NOTE: This function contains a potential schedule point (cond_resched()).
1793  */
1794 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1795 {
1796         struct list_head *tmp, *next;
1797         struct list_head  comp_reqs;
1798         int force_timer_recalc = 0;
1799
1800         ENTRY;
1801         if (atomic_read(&set->set_remaining) == 0)
1802                 RETURN(1);
1803
1804         INIT_LIST_HEAD(&comp_reqs);
1805         list_for_each_safe(tmp, next, &set->set_requests) {
1806                 struct ptlrpc_request *req =
1807                         list_entry(tmp, struct ptlrpc_request,
1808                                    rq_set_chain);
1809                 struct obd_import *imp = req->rq_import;
1810                 int unregistered = 0;
1811                 int async = 1;
1812                 int rc = 0;
1813
1814                 if (req->rq_phase == RQ_PHASE_COMPLETE) {
1815                         list_move_tail(&req->rq_set_chain, &comp_reqs);
1816                         continue;
1817                 }
1818
1819                 /*
1820                  * This schedule point is mainly for the ptlrpcd caller of this
1821                  * function.  Most ptlrpc sets are not long-lived and unbounded
1822                  * in length, but at the least the set used by the ptlrpcd is.
1823                  * Since the processing time is unbounded, we need to insert an
1824                  * explicit schedule point to make the thread well-behaved.
1825                  */
1826                 cond_resched();
1827
1828                 /*
1829                  * If the caller requires to allow to be interpreted by force
1830                  * and it has really been interpreted, then move the request
1831                  * to RQ_PHASE_INTERPRET phase in spite of what the current
1832                  * phase is.
1833                  */
1834                 if (unlikely(req->rq_allow_intr && req->rq_intr)) {
1835                         req->rq_status = -EINTR;
1836                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1837
1838                         /*
1839                          * Since it is interpreted and we have to wait for
1840                          * the reply to be unlinked, then use sync mode.
1841                          */
1842                         async = 0;
1843
1844                         GOTO(interpret, req->rq_status);
1845                 }
1846
1847                 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
1848                         force_timer_recalc = 1;
1849
1850                 /* delayed send - skip */
1851                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1852                         continue;
1853
1854                 /* delayed resend - skip */
1855                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1856                     req->rq_sent > ktime_get_real_seconds())
1857                         continue;
1858
1859                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1860                       req->rq_phase == RQ_PHASE_BULK ||
1861                       req->rq_phase == RQ_PHASE_INTERPRET ||
1862                       req->rq_phase == RQ_PHASE_UNREG_RPC ||
1863                       req->rq_phase == RQ_PHASE_UNREG_BULK)) {
1864                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1865                         LBUG();
1866                 }
1867
1868                 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
1869                     req->rq_phase == RQ_PHASE_UNREG_BULK) {
1870                         LASSERT(req->rq_next_phase != req->rq_phase);
1871                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1872
1873                         if (req->rq_req_deadline &&
1874                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
1875                                 req->rq_req_deadline = 0;
1876                         if (req->rq_reply_deadline &&
1877                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
1878                                 req->rq_reply_deadline = 0;
1879                         if (req->rq_bulk_deadline &&
1880                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
1881                                 req->rq_bulk_deadline = 0;
1882
1883                         /*
1884                          * Skip processing until reply is unlinked. We
1885                          * can't return to pool before that and we can't
1886                          * call interpret before that. We need to make
1887                          * sure that all rdma transfers finished and will
1888                          * not corrupt any data.
1889                          */
1890                         if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
1891                             ptlrpc_client_recv_or_unlink(req))
1892                                 continue;
1893                         if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
1894                             ptlrpc_client_bulk_active(req))
1895                                 continue;
1896
1897                         /*
1898                          * Turn fail_loc off to prevent it from looping
1899                          * forever.
1900                          */
1901                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1902                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1903                                                      OBD_FAIL_ONCE);
1904                         }
1905                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1906                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1907                                                      OBD_FAIL_ONCE);
1908                         }
1909
1910                         /*
1911                          * Move to next phase if reply was successfully
1912                          * unlinked.
1913                          */
1914                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1915                 }
1916
1917                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1918                         GOTO(interpret, req->rq_status);
1919
1920                 /*
1921                  * Note that this also will start async reply unlink.
1922                  */
1923                 if (req->rq_net_err && !req->rq_timedout) {
1924                         ptlrpc_expire_one_request(req, 1);
1925
1926                         /*
1927                          * Check if we still need to wait for unlink.
1928                          */
1929                         if (ptlrpc_client_recv_or_unlink(req) ||
1930                             ptlrpc_client_bulk_active(req))
1931                                 continue;
1932                         /* If there is no need to resend, fail it now. */
1933                         if (req->rq_no_resend) {
1934                                 if (req->rq_status == 0)
1935                                         req->rq_status = -EIO;
1936                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1937                                 GOTO(interpret, req->rq_status);
1938                         } else {
1939                                 continue;
1940                         }
1941                 }
1942
1943                 if (req->rq_err) {
1944                         spin_lock(&req->rq_lock);
1945                         req->rq_replied = 0;
1946                         spin_unlock(&req->rq_lock);
1947                         if (req->rq_status == 0)
1948                                 req->rq_status = -EIO;
1949                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1950                         GOTO(interpret, req->rq_status);
1951                 }
1952
1953                 /*
1954                  * ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
1955                  * so it sets rq_intr regardless of individual rpc
1956                  * timeouts. The synchronous IO waiting path sets
1957                  * rq_intr irrespective of whether ptlrpcd
1958                  * has seen a timeout.  Our policy is to only interpret
1959                  * interrupted rpcs after they have timed out, so we
1960                  * need to enforce that here.
1961                  */
1962
1963                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
1964                                      req->rq_wait_ctx)) {
1965                         req->rq_status = -EINTR;
1966                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1967                         GOTO(interpret, req->rq_status);
1968                 }
1969
1970                 if (req->rq_phase == RQ_PHASE_RPC) {
1971                         if (req->rq_timedout || req->rq_resend ||
1972                             req->rq_waiting || req->rq_wait_ctx) {
1973                                 int status;
1974
1975                                 if (!ptlrpc_unregister_reply(req, 1)) {
1976                                         ptlrpc_unregister_bulk(req, 1);
1977                                         continue;
1978                                 }
1979
1980                                 spin_lock(&imp->imp_lock);
1981                                 if (ptlrpc_import_delay_req(imp, req,
1982                                                             &status)) {
1983                                         /*
1984                                          * put on delay list - only if we wait
1985                                          * recovery finished - before send
1986                                          */
1987                                         list_move_tail(&req->rq_list,
1988                                                        &imp->imp_delayed_list);
1989                                         spin_unlock(&imp->imp_lock);
1990                                         continue;
1991                                 }
1992
1993                                 if (status != 0)  {
1994                                         req->rq_status = status;
1995                                         ptlrpc_rqphase_move(req,
1996                                                             RQ_PHASE_INTERPRET);
1997                                         spin_unlock(&imp->imp_lock);
1998                                         GOTO(interpret, req->rq_status);
1999                                 }
2000                                 /* ignore on just initiated connections */
2001                                 if (ptlrpc_no_resend(req) &&
2002                                     !req->rq_wait_ctx &&
2003                                     imp->imp_generation !=
2004                                     imp->imp_initiated_at) {
2005                                         req->rq_status = -ENOTCONN;
2006                                         ptlrpc_rqphase_move(req,
2007                                                             RQ_PHASE_INTERPRET);
2008                                         spin_unlock(&imp->imp_lock);
2009                                         GOTO(interpret, req->rq_status);
2010                                 }
2011
2012                                 list_move_tail(&req->rq_list,
2013                                                &imp->imp_sending_list);
2014
2015                                 spin_unlock(&imp->imp_lock);
2016
2017                                 spin_lock(&req->rq_lock);
2018                                 req->rq_waiting = 0;
2019                                 spin_unlock(&req->rq_lock);
2020
2021                                 if (req->rq_timedout || req->rq_resend) {
2022                                         /*
2023                                          * This is re-sending anyways,
2024                                          * let's mark req as resend.
2025                                          */
2026                                         spin_lock(&req->rq_lock);
2027                                         req->rq_resend = 1;
2028                                         spin_unlock(&req->rq_lock);
2029                                 }
2030                                 /*
2031                                  * rq_wait_ctx is only touched by ptlrpcd,
2032                                  * so no lock is needed here.
2033                                  */
2034                                 status = sptlrpc_req_refresh_ctx(req, -1);
2035                                 if (status) {
2036                                         if (req->rq_err) {
2037                                                 req->rq_status = status;
2038                                                 spin_lock(&req->rq_lock);
2039                                                 req->rq_wait_ctx = 0;
2040                                                 spin_unlock(&req->rq_lock);
2041                                                 force_timer_recalc = 1;
2042                                         } else {
2043                                                 spin_lock(&req->rq_lock);
2044                                                 req->rq_wait_ctx = 1;
2045                                                 spin_unlock(&req->rq_lock);
2046                                         }
2047
2048                                         continue;
2049                                 } else {
2050                                         spin_lock(&req->rq_lock);
2051                                         req->rq_wait_ctx = 0;
2052                                         spin_unlock(&req->rq_lock);
2053                                 }
2054
2055                                 /*
2056                                  * In any case, the previous bulk should be
2057                                  * cleaned up to prepare for the new sending
2058                                  */
2059                                 if (req->rq_bulk &&
2060                                     !ptlrpc_unregister_bulk(req, 1))
2061                                         continue;
2062
2063                                 rc = ptl_send_rpc(req, 0);
2064                                 if (rc == -ENOMEM) {
2065                                         spin_lock(&imp->imp_lock);
2066                                         if (!list_empty(&req->rq_list))
2067                                                 list_del_init(&req->rq_list);
2068                                         spin_unlock(&imp->imp_lock);
2069                                         ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
2070                                         continue;
2071                                 }
2072                                 if (rc) {
2073                                         DEBUG_REQ(D_HA, req,
2074                                                   "send failed: rc = %d", rc);
2075                                         force_timer_recalc = 1;
2076                                         spin_lock(&req->rq_lock);
2077                                         req->rq_net_err = 1;
2078                                         spin_unlock(&req->rq_lock);
2079                                         continue;
2080                                 }
2081                                 /* need to reset the timeout */
2082                                 force_timer_recalc = 1;
2083                         }
2084
2085                         spin_lock(&req->rq_lock);
2086
2087                         if (ptlrpc_client_early(req)) {
2088                                 ptlrpc_at_recv_early_reply(req);
2089                                 spin_unlock(&req->rq_lock);
2090                                 continue;
2091                         }
2092
2093                         /* Still waiting for a reply? */
2094                         if (ptlrpc_client_recv(req)) {
2095                                 spin_unlock(&req->rq_lock);
2096                                 continue;
2097                         }
2098
2099                         /* Did we actually receive a reply? */
2100                         if (!ptlrpc_client_replied(req)) {
2101                                 spin_unlock(&req->rq_lock);
2102                                 continue;
2103                         }
2104
2105                         spin_unlock(&req->rq_lock);
2106
2107                         /*
2108                          * unlink from net because we are going to
2109                          * swab in-place of reply buffer
2110                          */
2111                         unregistered = ptlrpc_unregister_reply(req, 1);
2112                         if (!unregistered)
2113                                 continue;
2114
2115                         req->rq_status = after_reply(req);
2116                         if (req->rq_resend)
2117                                 continue;
2118
2119                         /*
2120                          * If there is no bulk associated with this request,
2121                          * then we're done and should let the interpreter
2122                          * process the reply. Similarly if the RPC returned
2123                          * an error, and therefore the bulk will never arrive.
2124                          */
2125                         if (!req->rq_bulk || req->rq_status < 0) {
2126                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2127                                 GOTO(interpret, req->rq_status);
2128                         }
2129
2130                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
2131                 }
2132
2133                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
2134                 if (ptlrpc_client_bulk_active(req))
2135                         continue;
2136
2137                 if (req->rq_bulk->bd_failure) {
2138                         /*
2139                          * The RPC reply arrived OK, but the bulk screwed
2140                          * up!  Dead weird since the server told us the RPC
2141                          * was good after getting the REPLY for her GET or
2142                          * the ACK for her PUT.
2143                          */
2144                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
2145                         req->rq_status = -EIO;
2146                 }
2147
2148                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2149
2150 interpret:
2151                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
2152
2153                 /*
2154                  * This moves to "unregistering" phase we need to wait for
2155                  * reply unlink.
2156                  */
2157                 if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
2158                         /* start async bulk unlink too */
2159                         ptlrpc_unregister_bulk(req, 1);
2160                         continue;
2161                 }
2162
2163                 if (!ptlrpc_unregister_bulk(req, async))
2164                         continue;
2165
2166                 /*
2167                  * When calling interpret receiving already should be
2168                  * finished.
2169                  */
2170                 LASSERT(!req->rq_receiving_reply);
2171
2172                 ptlrpc_req_interpret(env, req, req->rq_status);
2173
2174                 if (ptlrpcd_check_work(req)) {
2175                         atomic_dec(&set->set_remaining);
2176                         continue;
2177                 }
2178                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
2179
2180                 if (req->rq_reqmsg)
2181                         CDEBUG(D_RPCTRACE,
2182                                "Completed RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
2183                                req, current_comm(),
2184                                imp->imp_obd->obd_uuid.uuid,
2185                                lustre_msg_get_status(req->rq_reqmsg),
2186                                req->rq_xid,
2187                                obd_import_nid2str(imp),
2188                                lustre_msg_get_opc(req->rq_reqmsg),
2189                                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
2190
2191                 spin_lock(&imp->imp_lock);
2192                 /*
2193                  * Request already may be not on sending or delaying list. This
2194                  * may happen in the case of marking it erroneous for the case
2195                  * ptlrpc_import_delay_req(req, status) find it impossible to
2196                  * allow sending this rpc and returns *status != 0.
2197                  */
2198                 if (!list_empty(&req->rq_list)) {
2199                         list_del_init(&req->rq_list);
2200                         atomic_dec(&imp->imp_inflight);
2201                 }
2202                 list_del_init(&req->rq_unreplied_list);
2203                 spin_unlock(&imp->imp_lock);
2204
2205                 atomic_dec(&set->set_remaining);
2206                 wake_up_all(&imp->imp_recovery_waitq);
2207
2208                 if (set->set_producer) {
2209                         /* produce a new request if possible */
2210                         if (ptlrpc_set_producer(set) > 0)
2211                                 force_timer_recalc = 1;
2212
2213                         /*
2214                          * free the request that has just been completed
2215                          * in order not to pollute set->set_requests
2216                          */
2217                         list_del_init(&req->rq_set_chain);
2218                         spin_lock(&req->rq_lock);
2219                         req->rq_set = NULL;
2220                         req->rq_invalid_rqset = 0;
2221                         spin_unlock(&req->rq_lock);
2222
2223                         /* record rq_status to compute the final status later */
2224                         if (req->rq_status != 0)
2225                                 set->set_rc = req->rq_status;
2226                         ptlrpc_req_finished(req);
2227                 } else {
2228                         list_move_tail(&req->rq_set_chain, &comp_reqs);
2229                 }
2230         }
2231
2232         /*
2233          * move completed request at the head of list so it's easier for
2234          * caller to find them
2235          */
2236         list_splice(&comp_reqs, &set->set_requests);
2237
2238         /* If we hit an error, we want to recover promptly. */
2239         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
2240 }
2241 EXPORT_SYMBOL(ptlrpc_check_set);
2242
2243 /**
2244  * Time out request \a req. is \a async_unlink is set, that means do not wait
2245  * until LNet actually confirms network buffer unlinking.
2246  * Return 1 if we should give up further retrying attempts or 0 otherwise.
2247  */
2248 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
2249 {
2250         struct obd_import *imp = req->rq_import;
2251         unsigned int debug_mask = D_RPCTRACE;
2252         int rc = 0;
2253
2254         ENTRY;
2255         spin_lock(&req->rq_lock);
2256         req->rq_timedout = 1;
2257         spin_unlock(&req->rq_lock);
2258
2259         if (ptlrpc_console_allow(req, lustre_msg_get_opc(req->rq_reqmsg),
2260                                  lustre_msg_get_status(req->rq_reqmsg)))
2261                 debug_mask = D_WARNING;
2262         DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
2263                   req->rq_net_err ? "failed due to network error" :
2264                      ((req->rq_real_sent == 0 ||
2265                        req->rq_real_sent < req->rq_sent ||
2266                        req->rq_real_sent >= req->rq_deadline) ?
2267                       "timed out for sent delay" : "timed out for slow reply"),
2268                   (s64)req->rq_sent, (s64)req->rq_real_sent);
2269
2270         if (imp && obd_debug_peer_on_timeout)
2271                 LNetDebugPeer(imp->imp_connection->c_peer);
2272
2273         ptlrpc_unregister_reply(req, async_unlink);
2274         ptlrpc_unregister_bulk(req, async_unlink);
2275
2276         if (obd_dump_on_timeout)
2277                 libcfs_debug_dumplog();
2278
2279         if (!imp) {
2280                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
2281                 RETURN(1);
2282         }
2283
2284         atomic_inc(&imp->imp_timeouts);
2285
2286         /* The DLM server doesn't want recovery run on its imports. */
2287         if (imp->imp_dlm_fake)
2288                 RETURN(1);
2289
2290         /*
2291          * If this request is for recovery or other primordial tasks,
2292          * then error it out here.
2293          */
2294         if (req->rq_ctx_init || req->rq_ctx_fini ||
2295             req->rq_send_state != LUSTRE_IMP_FULL ||
2296             imp->imp_obd->obd_no_recov) {
2297                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
2298                           ptlrpc_import_state_name(req->rq_send_state),
2299                           ptlrpc_import_state_name(imp->imp_state));
2300                 spin_lock(&req->rq_lock);
2301                 req->rq_status = -ETIMEDOUT;
2302                 req->rq_err = 1;
2303                 spin_unlock(&req->rq_lock);
2304                 RETURN(1);
2305         }
2306
2307         /*
2308          * if a request can't be resent we can't wait for an answer after
2309          * the timeout
2310          */
2311         if (ptlrpc_no_resend(req)) {
2312                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
2313                 rc = 1;
2314         }
2315
2316         ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
2317
2318         RETURN(rc);
2319 }
2320
2321 /**
2322  * Time out all uncompleted requests in request set pointed by \a data
2323  * Callback used when waiting on sets with l_wait_event.
2324  * Always returns 1.
2325  */
2326 int ptlrpc_expired_set(void *data)
2327 {
2328         struct ptlrpc_request_set *set = data;
2329         struct list_head *tmp;
2330         time64_t now = ktime_get_real_seconds();
2331
2332         ENTRY;
2333         LASSERT(set != NULL);
2334
2335         /*
2336          * A timeout expired. See which reqs it applies to...
2337          */
2338         list_for_each(tmp, &set->set_requests) {
2339                 struct ptlrpc_request *req =
2340                         list_entry(tmp, struct ptlrpc_request,
2341                                    rq_set_chain);
2342
2343                 /* don't expire request waiting for context */
2344                 if (req->rq_wait_ctx)
2345                         continue;
2346
2347                 /* Request in-flight? */
2348                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2349                        !req->rq_waiting && !req->rq_resend) ||
2350                       (req->rq_phase == RQ_PHASE_BULK)))
2351                         continue;
2352
2353                 if (req->rq_timedout ||     /* already dealt with */
2354                     req->rq_deadline > now) /* not expired */
2355                         continue;
2356
2357                 /*
2358                  * Deal with this guy. Do it asynchronously to not block
2359                  * ptlrpcd thread.
2360                  */
2361                 ptlrpc_expire_one_request(req, 1);
2362         }
2363
2364         /*
2365          * When waiting for a whole set, we always break out of the
2366          * sleep so we can recalculate the timeout, or enable interrupts
2367          * if everyone's timed out.
2368          */
2369         RETURN(1);
2370 }
2371
2372 /**
2373  * Sets rq_intr flag in \a req under spinlock.
2374  */
2375 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
2376 {
2377         spin_lock(&req->rq_lock);
2378         req->rq_intr = 1;
2379         spin_unlock(&req->rq_lock);
2380 }
2381 EXPORT_SYMBOL(ptlrpc_mark_interrupted);
2382
2383 /**
2384  * Interrupts (sets interrupted flag) all uncompleted requests in
2385  * a set \a data. Callback for l_wait_event for interruptible waits.
2386  */
2387 static void ptlrpc_interrupted_set(void *data)
2388 {
2389         struct ptlrpc_request_set *set = data;
2390         struct list_head *tmp;
2391
2392         LASSERT(set != NULL);
2393         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2394
2395         list_for_each(tmp, &set->set_requests) {
2396                 struct ptlrpc_request *req =
2397                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2398
2399                 if (req->rq_intr)
2400                         continue;
2401
2402                 if (req->rq_phase != RQ_PHASE_RPC &&
2403                     req->rq_phase != RQ_PHASE_UNREG_RPC &&
2404                     !req->rq_allow_intr)
2405                         continue;
2406
2407                 ptlrpc_mark_interrupted(req);
2408         }
2409 }
2410
2411 /**
2412  * Get the smallest timeout in the set; this does NOT set a timeout.
2413  */
2414 time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2415 {
2416         struct list_head *tmp;
2417         time64_t now = ktime_get_real_seconds();
2418         int timeout = 0;
2419         struct ptlrpc_request *req;
2420         time64_t deadline;
2421
2422         ENTRY;
2423         list_for_each(tmp, &set->set_requests) {
2424                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2425
2426                 /* Request in-flight? */
2427                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2428                       (req->rq_phase == RQ_PHASE_BULK) ||
2429                       (req->rq_phase == RQ_PHASE_NEW)))
2430                         continue;
2431
2432                 /* Already timed out. */
2433                 if (req->rq_timedout)
2434                         continue;
2435
2436                 /* Waiting for ctx. */
2437                 if (req->rq_wait_ctx)
2438                         continue;
2439
2440                 if (req->rq_phase == RQ_PHASE_NEW)
2441                         deadline = req->rq_sent;
2442                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2443                         deadline = req->rq_sent;
2444                 else
2445                         deadline = req->rq_sent + req->rq_timeout;
2446
2447                 if (deadline <= now)    /* actually expired already */
2448                         timeout = 1;    /* ASAP */
2449                 else if (timeout == 0 || timeout > deadline - now)
2450                         timeout = deadline - now;
2451         }
2452         RETURN(timeout);
2453 }
2454
2455 /**
2456  * Send all unset request from the set and then wait untill all
2457  * requests in the set complete (either get a reply, timeout, get an
2458  * error or otherwise be interrupted).
2459  * Returns 0 on success or error code otherwise.
2460  */
2461 int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set)
2462 {
2463         struct list_head *tmp;
2464         struct ptlrpc_request *req;
2465         struct l_wait_info lwi;
2466         time64_t timeout;
2467         int rc;
2468
2469         ENTRY;
2470         if (set->set_producer)
2471                 (void)ptlrpc_set_producer(set);
2472         else
2473                 list_for_each(tmp, &set->set_requests) {
2474                         req = list_entry(tmp, struct ptlrpc_request,
2475                                          rq_set_chain);
2476                         if (req->rq_phase == RQ_PHASE_NEW)
2477                                 (void)ptlrpc_send_new_req(req);
2478                 }
2479
2480         if (list_empty(&set->set_requests))
2481                 RETURN(0);
2482
2483         do {
2484                 timeout = ptlrpc_set_next_timeout(set);
2485
2486                 /*
2487                  * wait until all complete, interrupted, or an in-flight
2488                  * req times out
2489                  */
2490                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
2491                        set, timeout);
2492
2493                 if ((timeout == 0 && !signal_pending(current)) ||
2494                     set->set_allow_intr) {
2495                         /*
2496                          * No requests are in-flight (ether timed out
2497                          * or delayed), so we can allow interrupts.
2498                          * We still want to block for a limited time,
2499                          * so we allow interrupts during the timeout.
2500                          */
2501                         lwi = LWI_TIMEOUT_INTR_ALL(
2502                                         cfs_time_seconds(timeout ? timeout : 1),
2503                                         ptlrpc_expired_set,
2504                                         ptlrpc_interrupted_set, set);
2505
2506                         rc = l_wait_event(set->set_waitq,
2507                                           ptlrpc_check_set(NULL, set), &lwi);
2508                 } else {
2509                         /*
2510                          * At least one request is in flight, so no
2511                          * interrupts are allowed. Wait until all
2512                          * complete, or an in-flight req times out.
2513                          */
2514                         lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
2515                                           ptlrpc_expired_set, set);
2516
2517                         rc = l_wait_event(set->set_waitq,
2518                                           ptlrpc_check_set(NULL, set), &lwi);
2519
2520                         /*
2521                          * LU-769 - if we ignored the signal because
2522                          * it was already pending when we started, we
2523                          * need to handle it now or we risk it being
2524                          * ignored forever
2525                          */
2526                         if (rc == -ETIMEDOUT &&
2527                             signal_pending(current)) {
2528                                 sigset_t blocked_sigs =
2529                                         cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
2530
2531                                 /*
2532                                  * In fact we only interrupt for the
2533                                  * "fatal" signals like SIGINT or
2534                                  * SIGKILL. We still ignore less
2535                                  * important signals since ptlrpc set
2536                                  * is not easily reentrant from
2537                                  * userspace again
2538                                  */
2539                                 if (signal_pending(current))
2540                                         ptlrpc_interrupted_set(set);
2541                                 cfs_restore_sigs(blocked_sigs);
2542                         }
2543                 }
2544
2545                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2546
2547                 /*
2548                  * -EINTR => all requests have been flagged rq_intr so next
2549                  * check completes.
2550                  * -ETIMEDOUT => someone timed out.  When all reqs have
2551                  * timed out, signals are enabled allowing completion with
2552                  * EINTR.
2553                  * I don't really care if we go once more round the loop in
2554                  * the error cases -eeb.
2555                  */
2556                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2557                         list_for_each(tmp, &set->set_requests) {
2558                                 req = list_entry(tmp, struct ptlrpc_request,
2559                                                  rq_set_chain);
2560                                 spin_lock(&req->rq_lock);
2561                                 req->rq_invalid_rqset = 1;
2562                                 spin_unlock(&req->rq_lock);
2563                         }
2564                 }
2565         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2566
2567         LASSERT(atomic_read(&set->set_remaining) == 0);
2568
2569         rc = set->set_rc; /* rq_status of already freed requests if any */
2570         list_for_each(tmp, &set->set_requests) {
2571                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2572
2573                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2574                 if (req->rq_status != 0)
2575                         rc = req->rq_status;
2576         }
2577
2578         RETURN(rc);
2579 }
2580 EXPORT_SYMBOL(ptlrpc_set_wait);
2581
2582 /**
2583  * Helper fuction for request freeing.
2584  * Called when request count reached zero and request needs to be freed.
2585  * Removes request from all sorts of sending/replay lists it might be on,
2586  * frees network buffers if any are present.
2587  * If \a locked is set, that means caller is already holding import imp_lock
2588  * and so we no longer need to reobtain it (for certain lists manipulations)
2589  */
2590 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2591 {
2592         ENTRY;
2593
2594         if (!request)
2595                 RETURN_EXIT;
2596
2597         LASSERT(!request->rq_srv_req);
2598         LASSERT(request->rq_export == NULL);
2599         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
2600         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
2601         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
2602         LASSERTF(!request->rq_replay, "req %p\n", request);
2603
2604         req_capsule_fini(&request->rq_pill);
2605
2606         /*
2607          * We must take it off the imp_replay_list first.  Otherwise, we'll set
2608          * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2609          */
2610         if (request->rq_import) {
2611                 if (!locked)
2612                         spin_lock(&request->rq_import->imp_lock);
2613                 list_del_init(&request->rq_replay_list);
2614                 list_del_init(&request->rq_unreplied_list);
2615                 if (!locked)
2616                         spin_unlock(&request->rq_import->imp_lock);
2617         }
2618         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
2619
2620         if (atomic_read(&request->rq_refcount) != 0) {
2621                 DEBUG_REQ(D_ERROR, request,
2622                           "freeing request with nonzero refcount");
2623                 LBUG();
2624         }
2625
2626         if (request->rq_repbuf)
2627                 sptlrpc_cli_free_repbuf(request);
2628
2629         if (request->rq_import) {
2630                 class_import_put(request->rq_import);
2631                 request->rq_import = NULL;
2632         }
2633         if (request->rq_bulk)
2634                 ptlrpc_free_bulk(request->rq_bulk);
2635
2636         if (request->rq_reqbuf || request->rq_clrbuf)
2637                 sptlrpc_cli_free_reqbuf(request);
2638
2639         if (request->rq_cli_ctx)
2640                 sptlrpc_req_put_ctx(request, !locked);
2641
2642         if (request->rq_pool)
2643                 __ptlrpc_free_req_to_pool(request);
2644         else
2645                 ptlrpc_request_cache_free(request);
2646         EXIT;
2647 }
2648
2649 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
2650 /**
2651  * Drop one request reference. Must be called with import imp_lock held.
2652  * When reference count drops to zero, request is freed.
2653  */
2654 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
2655 {
2656         assert_spin_locked(&request->rq_import->imp_lock);
2657         (void)__ptlrpc_req_finished(request, 1);
2658 }
2659
2660 /**
2661  * Helper function
2662  * Drops one reference count for request \a request.
2663  * \a locked set indicates that caller holds import imp_lock.
2664  * Frees the request whe reference count reaches zero.
2665  *
2666  * \retval 1    the request is freed
2667  * \retval 0    some others still hold references on the request
2668  */
2669 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
2670 {
2671         int count;
2672
2673         ENTRY;
2674         if (!request)
2675                 RETURN(1);
2676
2677         LASSERT(request != LP_POISON);
2678         LASSERT(request->rq_reqmsg != LP_POISON);
2679
2680         DEBUG_REQ(D_INFO, request, "refcount now %u",
2681                   atomic_read(&request->rq_refcount) - 1);
2682
2683         spin_lock(&request->rq_lock);
2684         count = atomic_dec_return(&request->rq_refcount);
2685         LASSERTF(count >= 0, "Invalid ref count %d\n", count);
2686
2687         /*
2688          * For open RPC, the client does not know the EA size (LOV, ACL, and
2689          * so on) before replied, then the client has to reserve very large
2690          * reply buffer. Such buffer will not be released until the RPC freed.
2691          * Since The open RPC is replayable, we need to keep it in the replay
2692          * list until close. If there are a lot of files opened concurrently,
2693          * then the client may be OOM.
2694          *
2695          * If fact, it is unnecessary to keep reply buffer for open replay,
2696          * related EAs have already been saved via mdc_save_lovea() before
2697          * coming here. So it is safe to free the reply buffer some earlier
2698          * before releasing the RPC to avoid client OOM. LU-9514
2699          */
2700         if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
2701                 spin_lock(&request->rq_early_free_lock);
2702                 sptlrpc_cli_free_repbuf(request);
2703                 request->rq_repbuf = NULL;
2704                 request->rq_repbuf_len = 0;
2705                 request->rq_repdata = NULL;
2706                 request->rq_reqdata_len = 0;
2707                 spin_unlock(&request->rq_early_free_lock);
2708         }
2709         spin_unlock(&request->rq_lock);
2710
2711         if (!count)
2712                 __ptlrpc_free_req(request, locked);
2713
2714         RETURN(!count);
2715 }
2716
2717 /**
2718  * Drops one reference count for a request.
2719  */
2720 void ptlrpc_req_finished(struct ptlrpc_request *request)
2721 {
2722         __ptlrpc_req_finished(request, 0);
2723 }
2724 EXPORT_SYMBOL(ptlrpc_req_finished);
2725
2726 /**
2727  * Returns xid of a \a request
2728  */
2729 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2730 {
2731         return request->rq_xid;
2732 }
2733 EXPORT_SYMBOL(ptlrpc_req_xid);
2734
2735 /**
2736  * Disengage the client's reply buffer from the network
2737  * NB does _NOT_ unregister any client-side bulk.
2738  * IDEMPOTENT, but _not_ safe against concurrent callers.
2739  * The request owner (i.e. the thread doing the I/O) must call...
2740  * Returns 0 on success or 1 if unregistering cannot be made.
2741  */
2742 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2743 {
2744         int rc;
2745         struct l_wait_info lwi;
2746
2747         /*
2748          * Might sleep.
2749          */
2750         LASSERT(!in_interrupt());
2751
2752         /* Let's setup deadline for reply unlink. */
2753         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2754             async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
2755                 request->rq_reply_deadline = ktime_get_real_seconds() +
2756                                              LONG_UNLINK;
2757
2758         /*
2759          * Nothing left to do.
2760          */
2761         if (!ptlrpc_client_recv_or_unlink(request))
2762                 RETURN(1);
2763
2764         LNetMDUnlink(request->rq_reply_md_h);
2765
2766         /*
2767          * Let's check it once again.
2768          */
2769         if (!ptlrpc_client_recv_or_unlink(request))
2770                 RETURN(1);
2771
2772         /* Move to "Unregistering" phase as reply was not unlinked yet. */
2773         ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
2774
2775         /*
2776          * Do not wait for unlink to finish.
2777          */
2778         if (async)
2779                 RETURN(0);
2780
2781         /*
2782          * We have to l_wait_event() whatever the result, to give liblustre
2783          * a chance to run reply_in_callback(), and to make sure we've
2784          * unlinked before returning a req to the pool.
2785          */
2786         for (;;) {
2787                 /* The wq argument is ignored by user-space wait_event macros */
2788                 wait_queue_head_t *wq = (request->rq_set) ?
2789                                         &request->rq_set->set_waitq :
2790                                         &request->rq_reply_waitq;
2791                 /*
2792                  * Network access will complete in finite time but the HUGE
2793                  * timeout lets us CWARN for visibility of sluggish NALs
2794                  */
2795                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
2796                                            cfs_time_seconds(1), NULL, NULL);
2797                 rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
2798                                   &lwi);
2799                 if (rc == 0) {
2800                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2801                         RETURN(1);
2802                 }
2803
2804                 LASSERT(rc == -ETIMEDOUT);
2805                 DEBUG_REQ(D_WARNING, request,
2806                           "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2807                           request->rq_receiving_reply,
2808                           request->rq_req_unlinked,
2809                           request->rq_reply_unlinked);
2810         }
2811         RETURN(0);
2812 }
2813
2814 static void ptlrpc_free_request(struct ptlrpc_request *req)
2815 {
2816         spin_lock(&req->rq_lock);
2817         req->rq_replay = 0;
2818         spin_unlock(&req->rq_lock);
2819
2820         if (req->rq_commit_cb)
2821                 req->rq_commit_cb(req);
2822         list_del_init(&req->rq_replay_list);
2823
2824         __ptlrpc_req_finished(req, 1);
2825 }
2826
2827 /**
2828  * the request is committed and dropped from the replay list of its import
2829  */
2830 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2831 {
2832         struct obd_import *imp = req->rq_import;
2833
2834         spin_lock(&imp->imp_lock);
2835         if (list_empty(&req->rq_replay_list)) {
2836                 spin_unlock(&imp->imp_lock);
2837                 return;
2838         }
2839
2840         if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
2841                 if (imp->imp_replay_cursor == &req->rq_replay_list)
2842                         imp->imp_replay_cursor = req->rq_replay_list.next;
2843                 ptlrpc_free_request(req);
2844         }
2845
2846         spin_unlock(&imp->imp_lock);
2847 }
2848 EXPORT_SYMBOL(ptlrpc_request_committed);
2849
2850 /**
2851  * Iterates through replay_list on import and prunes
2852  * all requests have transno smaller than last_committed for the
2853  * import and don't have rq_replay set.
2854  * Since requests are sorted in transno order, stops when meetign first
2855  * transno bigger than last_committed.
2856  * caller must hold imp->imp_lock
2857  */
2858 void ptlrpc_free_committed(struct obd_import *imp)
2859 {
2860         struct ptlrpc_request *req, *saved;
2861         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
2862         bool skip_committed_list = true;
2863
2864         ENTRY;
2865         LASSERT(imp != NULL);
2866         assert_spin_locked(&imp->imp_lock);
2867
2868         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
2869             imp->imp_generation == imp->imp_last_generation_checked) {
2870                 CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
2871                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
2872                 RETURN_EXIT;
2873         }
2874         CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
2875                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
2876                imp->imp_generation);
2877
2878         if (imp->imp_generation != imp->imp_last_generation_checked ||
2879             imp->imp_last_transno_checked == 0)
2880                 skip_committed_list = false;
2881
2882         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
2883         imp->imp_last_generation_checked = imp->imp_generation;
2884
2885         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2886                                  rq_replay_list) {
2887                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2888                 LASSERT(req != last_req);
2889                 last_req = req;
2890
2891                 if (req->rq_transno == 0) {
2892                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2893                         LBUG();
2894                 }
2895                 if (req->rq_import_generation < imp->imp_generation) {
2896                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2897                         GOTO(free_req, 0);
2898                 }
2899
2900                 /* not yet committed */
2901                 if (req->rq_transno > imp->imp_peer_committed_transno) {
2902                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2903                         break;
2904                 }
2905
2906                 if (req->rq_replay) {
2907                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
2908                         list_move_tail(&req->rq_replay_list,
2909                                        &imp->imp_committed_list);
2910                         continue;
2911                 }
2912
2913                 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
2914                           imp->imp_peer_committed_transno);
2915 free_req:
2916                 ptlrpc_free_request(req);
2917         }
2918
2919         if (skip_committed_list)
2920                 GOTO(out, 0);
2921
2922         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
2923                                  rq_replay_list) {
2924                 LASSERT(req->rq_transno != 0);
2925                 if (req->rq_import_generation < imp->imp_generation ||
2926                     !req->rq_replay) {
2927                         DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
2928                                   req->rq_import_generation <
2929                                   imp->imp_generation ? "stale" : "closed");
2930
2931                         if (imp->imp_replay_cursor == &req->rq_replay_list)
2932                                 imp->imp_replay_cursor =
2933                                         req->rq_replay_list.next;
2934
2935                         ptlrpc_free_request(req);
2936                 }
2937         }
2938 out:
2939         EXIT;
2940 }
2941
2942 void ptlrpc_cleanup_client(struct obd_import *imp)
2943 {
2944         ENTRY;
2945         EXIT;
2946 }
2947
2948 /**
2949  * Schedule previously sent request for resend.
2950  * For bulk requests we assign new xid (to avoid problems with
2951  * lost replies and therefore several transfers landing into same buffer
2952  * from different sending attempts).
2953  */
2954 void ptlrpc_resend_req(struct ptlrpc_request *req)
2955 {
2956         DEBUG_REQ(D_HA, req, "going to resend");
2957         spin_lock(&req->rq_lock);
2958
2959         /*
2960          * Request got reply but linked to the import list still.
2961          * Let ptlrpc_check_set() process it.
2962          */
2963         if (ptlrpc_client_replied(req)) {
2964                 spin_unlock(&req->rq_lock);
2965                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
2966                 return;
2967         }
2968
2969         req->rq_status = -EAGAIN;
2970
2971         req->rq_resend = 1;
2972         req->rq_net_err = 0;
2973         req->rq_timedout = 0;
2974
2975         ptlrpc_client_wake_req(req);
2976         spin_unlock(&req->rq_lock);
2977 }
2978
2979 /* XXX: this function and rq_status are currently unused */
2980 void ptlrpc_restart_req(struct ptlrpc_request *req)
2981 {
2982         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
2983         req->rq_status = -ERESTARTSYS;
2984
2985         spin_lock(&req->rq_lock);
2986         req->rq_restart = 1;
2987         req->rq_timedout = 0;
2988         ptlrpc_client_wake_req(req);
2989         spin_unlock(&req->rq_lock);
2990 }
2991
2992 /**
2993  * Grab additional reference on a request \a req
2994  */
2995 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
2996 {
2997         ENTRY;
2998         atomic_inc(&req->rq_refcount);
2999         RETURN(req);
3000 }
3001 EXPORT_SYMBOL(ptlrpc_request_addref);
3002
3003 /**
3004  * Add a request to import replay_list.
3005  * Must be called under imp_lock
3006  */
3007 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
3008                                       struct obd_import *imp)
3009 {
3010         struct list_head *tmp;
3011
3012         assert_spin_locked(&imp->imp_lock);
3013
3014         if (req->rq_transno == 0) {
3015                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
3016                 LBUG();
3017         }
3018
3019         /*
3020          * clear this for new requests that were resent as well
3021          * as resent replayed requests.
3022          */
3023         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3024
3025         /* don't re-add requests that have been replayed */
3026         if (!list_empty(&req->rq_replay_list))
3027                 return;
3028
3029         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
3030
3031         spin_lock(&req->rq_lock);
3032         req->rq_resend = 0;
3033         spin_unlock(&req->rq_lock);
3034
3035         LASSERT(imp->imp_replayable);
3036         /* Balanced in ptlrpc_free_committed, usually. */
3037         ptlrpc_request_addref(req);
3038         list_for_each_prev(tmp, &imp->imp_replay_list) {
3039                 struct ptlrpc_request *iter = list_entry(tmp,
3040                                                          struct ptlrpc_request,
3041                                                          rq_replay_list);
3042
3043                 /*
3044                  * We may have duplicate transnos if we create and then
3045                  * open a file, or for closes retained if to match creating
3046                  * opens, so use req->rq_xid as a secondary key.
3047                  * (See bugs 684, 685, and 428.)
3048                  * XXX no longer needed, but all opens need transnos!
3049                  */
3050                 if (iter->rq_transno > req->rq_transno)
3051                         continue;
3052
3053                 if (iter->rq_transno == req->rq_transno) {
3054                         LASSERT(iter->rq_xid != req->rq_xid);
3055                         if (iter->rq_xid > req->rq_xid)
3056                                 continue;
3057                 }
3058
3059                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
3060                 return;
3061         }
3062
3063         list_add(&req->rq_replay_list, &imp->imp_replay_list);
3064 }
3065
3066 /**
3067  * Send request and wait until it completes.
3068  * Returns request processing status.
3069  */
3070 int ptlrpc_queue_wait(struct ptlrpc_request *req)
3071 {
3072         struct ptlrpc_request_set *set;
3073         int rc;
3074
3075         ENTRY;
3076         LASSERT(req->rq_set == NULL);
3077         LASSERT(!req->rq_receiving_reply);
3078
3079         set = ptlrpc_prep_set();
3080         if (!set) {
3081                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
3082                 RETURN(-ENOMEM);
3083         }
3084
3085         /* for distributed debugging */
3086         lustre_msg_set_status(req->rq_reqmsg, current_pid());
3087
3088         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
3089         ptlrpc_request_addref(req);
3090         ptlrpc_set_add_req(set, req);
3091         rc = ptlrpc_set_wait(NULL, set);
3092         ptlrpc_set_destroy(set);
3093
3094         RETURN(rc);
3095 }
3096 EXPORT_SYMBOL(ptlrpc_queue_wait);
3097
3098 /**
3099  * Callback used for replayed requests reply processing.
3100  * In case of successful reply calls registered request replay callback.
3101  * In case of error restart replay process.
3102  */
3103 static int ptlrpc_replay_interpret(const struct lu_env *env,
3104                                    struct ptlrpc_request *req,
3105                                    void *args, int rc)
3106 {
3107         struct ptlrpc_replay_async_args *aa = args;
3108         struct obd_import *imp = req->rq_import;
3109
3110         ENTRY;
3111         atomic_dec(&imp->imp_replay_inflight);
3112
3113         /*
3114          * Note: if it is bulk replay (MDS-MDS replay), then even if
3115          * server got the request, but bulk transfer timeout, let's
3116          * replay the bulk req again
3117          */
3118         if (!ptlrpc_client_replied(req) ||
3119             (req->rq_bulk &&
3120              lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
3121                 DEBUG_REQ(D_ERROR, req, "request replay timed out");
3122                 GOTO(out, rc = -ETIMEDOUT);
3123         }
3124
3125         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
3126             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
3127             lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
3128                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
3129
3130         /** VBR: check version failure */
3131         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
3132                 /** replay was failed due to version mismatch */
3133                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay");
3134                 spin_lock(&imp->imp_lock);
3135                 imp->imp_vbr_failed = 1;
3136                 spin_unlock(&imp->imp_lock);
3137                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3138         } else {
3139                 /** The transno had better not change over replay. */
3140                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
3141                          lustre_msg_get_transno(req->rq_repmsg) ||
3142                          lustre_msg_get_transno(req->rq_repmsg) == 0,
3143                          "%#llx/%#llx\n",
3144                          lustre_msg_get_transno(req->rq_reqmsg),
3145                          lustre_msg_get_transno(req->rq_repmsg));
3146         }
3147
3148         spin_lock(&imp->imp_lock);
3149         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
3150         spin_unlock(&imp->imp_lock);
3151         LASSERT(imp->imp_last_replay_transno);
3152
3153         /* transaction number shouldn't be bigger than the latest replayed */
3154         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
3155                 DEBUG_REQ(D_ERROR, req,
3156                           "Reported transno=%llu is bigger than replayed=%llu",
3157                           req->rq_transno,
3158                           lustre_msg_get_transno(req->rq_reqmsg));
3159                 GOTO(out, rc = -EINVAL);
3160         }
3161
3162         DEBUG_REQ(D_HA, req, "got reply");
3163
3164         /* let the callback do fixups, possibly including in the request */
3165         if (req->rq_replay_cb)
3166                 req->rq_replay_cb(req);
3167
3168         if (ptlrpc_client_replied(req) &&
3169             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
3170                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
3171                           lustre_msg_get_status(req->rq_repmsg),
3172                           aa->praa_old_status);
3173
3174                 /*
3175                  * Note: If the replay fails for MDT-MDT recovery, let's
3176                  * abort all of the following requests in the replay
3177                  * and sending list, because MDT-MDT update requests
3178                  * are dependent on each other, see LU-7039
3179                  */
3180                 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
3181                         struct ptlrpc_request *free_req;
3182                         struct ptlrpc_request *tmp;
3183
3184                         spin_lock(&imp->imp_lock);
3185                         list_for_each_entry_safe(free_req, tmp,
3186                                                  &imp->imp_replay_list,
3187                                                  rq_replay_list) {
3188                                 ptlrpc_free_request(free_req);
3189                         }
3190
3191                         list_for_each_entry_safe(free_req, tmp,
3192                                                  &imp->imp_committed_list,
3193                                                  rq_replay_list) {
3194                                 ptlrpc_free_request(free_req);
3195                         }
3196
3197                         list_for_each_entry_safe(free_req, tmp,
3198                                                  &imp->imp_delayed_list,
3199                                                  rq_list) {
3200                                 spin_lock(&free_req->rq_lock);
3201                                 free_req->rq_err = 1;
3202                                 free_req->rq_status = -EIO;
3203                                 ptlrpc_client_wake_req(free_req);
3204                                 spin_unlock(&free_req->rq_lock);
3205                         }
3206
3207                         list_for_each_entry_safe(free_req, tmp,
3208                                                  &imp->imp_sending_list,
3209                                                  rq_list) {
3210                                 spin_lock(&free_req->rq_lock);
3211                                 free_req->rq_err = 1;
3212                                 free_req->rq_status = -EIO;
3213                                 ptlrpc_client_wake_req(free_req);
3214                                 spin_unlock(&free_req->rq_lock);
3215                         }
3216                         spin_unlock(&imp->imp_lock);
3217                 }
3218         } else {
3219                 /* Put it back for re-replay. */
3220                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3221         }
3222
3223         /*
3224          * Errors while replay can set transno to 0, but
3225          * imp_last_replay_transno shouldn't be set to 0 anyway
3226          */
3227         if (req->rq_transno == 0)
3228                 CERROR("Transno is 0 during replay!\n");
3229
3230         /* continue with recovery */
3231         rc = ptlrpc_import_recovery_state_machine(imp);
3232  out:
3233         req->rq_send_state = aa->praa_old_state;
3234
3235         if (rc != 0)
3236                 /* this replay failed, so restart recovery */
3237                 ptlrpc_connect_import(imp);
3238
3239         RETURN(rc);
3240 }
3241
3242 /**
3243  * Prepares and queues request for replay.
3244  * Adds it to ptlrpcd queue for actual sending.
3245  * Returns 0 on success.
3246  */
3247 int ptlrpc_replay_req(struct ptlrpc_request *req)
3248 {
3249         struct ptlrpc_replay_async_args *aa;
3250
3251         ENTRY;
3252
3253         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
3254
3255         aa = ptlrpc_req_async_args(aa, req);
3256         memset(aa, 0, sizeof(*aa));
3257
3258         /* Prepare request to be resent with ptlrpcd */
3259         aa->praa_old_state = req->rq_send_state;
3260         req->rq_send_state = LUSTRE_IMP_REPLAY;
3261         req->rq_phase = RQ_PHASE_NEW;
3262         req->rq_next_phase = RQ_PHASE_UNDEFINED;
3263         if (req->rq_repmsg)
3264                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
3265         req->rq_status = 0;
3266         req->rq_interpret_reply = ptlrpc_replay_interpret;
3267         /* Readjust the timeout for current conditions */
3268         ptlrpc_at_set_req_timeout(req);
3269
3270         /* Tell server net_latency to calculate how long to wait for reply. */
3271         lustre_msg_set_service_time(req->rq_reqmsg,
3272                                     ptlrpc_at_get_net_latency(req));
3273         DEBUG_REQ(D_HA, req, "REPLAY");
3274
3275         atomic_inc(&req->rq_import->imp_replay_inflight);
3276         spin_lock(&req->rq_lock);
3277         req->rq_early_free_repbuf = 0;
3278         spin_unlock(&req->rq_lock);
3279         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
3280
3281         ptlrpcd_add_req(req);
3282         RETURN(0);
3283 }
3284
3285 /**
3286  * Aborts all in-flight request on import \a imp sending and delayed lists
3287  */
3288 void ptlrpc_abort_inflight(struct obd_import *imp)
3289 {
3290         struct list_head *tmp, *n;
3291         ENTRY;
3292
3293         /*
3294          * Make sure that no new requests get processed for this import.
3295          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
3296          * this flag and then putting requests on sending_list or delayed_list.
3297          */
3298         assert_spin_locked(&imp->imp_lock);
3299
3300         /*
3301          * XXX locking?  Maybe we should remove each request with the list
3302          * locked?  Also, how do we know if the requests on the list are
3303          * being freed at this time?
3304          */
3305         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
3306                 struct ptlrpc_request *req = list_entry(tmp,
3307                                                         struct ptlrpc_request,
3308                                                         rq_list);
3309
3310                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
3311
3312                 spin_lock(&req->rq_lock);
3313                 if (req->rq_import_generation < imp->imp_generation) {
3314                         req->rq_err = 1;
3315                         req->rq_status = -EIO;
3316                         ptlrpc_client_wake_req(req);
3317                 }
3318                 spin_unlock(&req->rq_lock);
3319         }
3320
3321         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
3322                 struct ptlrpc_request *req =
3323                         list_entry(tmp, struct ptlrpc_request, rq_list);
3324
3325                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
3326
3327                 spin_lock(&req->rq_lock);
3328                 if (req->rq_import_generation < imp->imp_generation) {
3329                         req->rq_err = 1;
3330                         req->rq_status = -EIO;
3331                         ptlrpc_client_wake_req(req);
3332                 }
3333                 spin_unlock(&req->rq_lock);
3334         }
3335
3336         /*
3337          * Last chance to free reqs left on the replay list, but we
3338          * will still leak reqs that haven't committed.
3339          */
3340         if (imp->imp_replayable)
3341                 ptlrpc_free_committed(imp);
3342
3343         EXIT;
3344 }
3345
3346 /**
3347  * Abort all uncompleted requests in request set \a set
3348  */
3349 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
3350 {
3351         struct list_head *tmp, *pos;
3352
3353         LASSERT(set != NULL);
3354
3355         list_for_each_safe(pos, tmp, &set->set_requests) {
3356                 struct ptlrpc_request *req =
3357                         list_entry(pos, struct ptlrpc_request,
3358                                    rq_set_chain);
3359
3360                 spin_lock(&req->rq_lock);
3361                 if (req->rq_phase != RQ_PHASE_RPC) {
3362                         spin_unlock(&req->rq_lock);
3363                         continue;
3364                 }
3365
3366                 req->rq_err = 1;
3367                 req->rq_status = -EINTR;
3368                 ptlrpc_client_wake_req(req);
3369                 spin_unlock(&req->rq_lock);
3370         }
3371 }
3372
3373 /**
3374  * Initialize the XID for the node.  This is common among all requests on
3375  * this node, and only requires the property that it is monotonically
3376  * increasing.  It does not need to be sequential.  Since this is also used
3377  * as the RDMA match bits, it is important that a single client NOT have
3378  * the same match bits for two different in-flight requests, hence we do
3379  * NOT want to have an XID per target or similar.
3380  *
3381  * To avoid an unlikely collision between match bits after a client reboot
3382  * (which would deliver old data into the wrong RDMA buffer) initialize
3383  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
3384  * If the time is clearly incorrect, we instead use a 62-bit random number.
3385  * In the worst case the random number will overflow 1M RPCs per second in
3386  * 9133 years, or permutations thereof.
3387  */
3388 #define YEAR_2004 (1ULL << 30)
3389 void ptlrpc_init_xid(void)
3390 {
3391         time64_t now = ktime_get_real_seconds();
3392         u64 xid;
3393
3394         if (now < YEAR_2004) {
3395                 get_random_bytes(&xid, sizeof(xid));
3396                 xid >>= 2;
3397                 xid |= (1ULL << 61);
3398         } else {
3399                 xid = (u64)now << 20;
3400         }
3401
3402         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
3403         BUILD_BUG_ON((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) !=
3404                      0);
3405         xid &= PTLRPC_BULK_OPS_MASK;
3406         atomic64_set(&ptlrpc_last_xid, xid);
3407 }
3408
3409 /**
3410  * Increase xid and returns resulting new value to the caller.
3411  *
3412  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
3413  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
3414  * itself uses the last bulk xid needed, so the server can determine the
3415  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
3416  * xid must align to a power-of-two value.
3417  *
3418  * This is assumed to be true due to the initial ptlrpc_last_xid
3419  * value also being initialized to a power-of-two value. LU-1431
3420  */
3421 __u64 ptlrpc_next_xid(void)
3422 {
3423         return atomic64_add_return(PTLRPC_BULK_OPS_COUNT, &ptlrpc_last_xid);
3424 }
3425
3426 /**
3427  * If request has a new allocated XID (new request or EINPROGRESS resend),
3428  * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3429  * request to ensure previous bulk fails and avoid problems with lost replies
3430  * and therefore several transfers landing into the same buffer from different
3431  * sending attempts.
3432  */
3433 void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
3434 {
3435         struct ptlrpc_bulk_desc *bd = req->rq_bulk;
3436
3437         LASSERT(bd != NULL);
3438
3439         /*
3440          * Generate new matchbits for all resend requests, including
3441          * resend replay.
3442          */
3443         if (req->rq_resend) {
3444                 __u64 old_mbits = req->rq_mbits;
3445
3446                 /*
3447                  * First time resend on -EINPROGRESS will generate new xid,
3448                  * so we can actually use the rq_xid as rq_mbits in such case,
3449                  * however, it's bit hard to distinguish such resend with a
3450                  * 'resend for the -EINPROGRESS resend'. To make it simple,
3451                  * we opt to generate mbits for all resend cases.
3452                  */
3453                 if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data,
3454                                  BULK_MBITS)) {
3455                         req->rq_mbits = ptlrpc_next_xid();
3456                 } else {
3457                         /*
3458                          * Old version transfers rq_xid to peer as
3459                          * matchbits.
3460                          */
3461                         spin_lock(&req->rq_import->imp_lock);
3462                         list_del_init(&req->rq_unreplied_list);
3463                         ptlrpc_assign_next_xid_nolock(req);
3464                         spin_unlock(&req->rq_import->imp_lock);
3465                         req->rq_mbits = req->rq_xid;
3466                 }
3467                 CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
3468                        old_mbits, req->rq_mbits);
3469         } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
3470                 /* Request being sent first time, use xid as matchbits. */
3471                 if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS)
3472                     || req->rq_mbits == 0) {
3473                         req->rq_mbits = req->rq_xid;
3474                 } else {
3475                         int total_md = (bd->bd_iov_count + LNET_MAX_IOV - 1) /
3476                                         LNET_MAX_IOV;
3477                         req->rq_mbits -= total_md - 1;
3478                 }
3479         } else {
3480                 /*
3481                  * Replay request, xid and matchbits have already been
3482                  * correctly assigned.
3483                  */
3484                 return;
3485         }
3486
3487         /*
3488          * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3489          * that server can infer the number of bulks that were prepared,
3490          * see LU-1431
3491          */
3492         req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
3493                           LNET_MAX_IOV) - 1;
3494
3495         /*
3496          * Set rq_xid as rq_mbits to indicate the final bulk for the old
3497          * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
3498          *
3499          * It's ok to directly set the rq_xid here, since this xid bump
3500          * won't affect the request position in unreplied list.
3501          */
3502         if (!OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS))
3503                 req->rq_xid = req->rq_mbits;
3504 }
3505
3506 /**
3507  * Get a glimpse at what next xid value might have been.
3508  * Returns possible next xid.
3509  */
3510 __u64 ptlrpc_sample_next_xid(void)
3511 {
3512         return atomic64_read(&ptlrpc_last_xid) + PTLRPC_BULK_OPS_COUNT;
3513 }
3514 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3515
3516 /**
3517  * Functions for operating ptlrpc workers.
3518  *
3519  * A ptlrpc work is a function which will be running inside ptlrpc context.
3520  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3521  *
3522  * 1. after a work is created, it can be used many times, that is:
3523  *         handler = ptlrpcd_alloc_work();
3524  *         ptlrpcd_queue_work();
3525  *
3526  *    queue it again when necessary:
3527  *         ptlrpcd_queue_work();
3528  *         ptlrpcd_destroy_work();
3529  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3530  *    it will only be queued once in any time. Also as its name implies, it may
3531  *    have delay before it really runs by ptlrpcd thread.
3532  */
3533 struct ptlrpc_work_async_args {
3534         int (*cb)(const struct lu_env *, void *);
3535         void *cbdata;
3536 };
3537
3538 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3539 {
3540         /* re-initialize the req */
3541         req->rq_timeout         = obd_timeout;
3542         req->rq_sent            = ktime_get_real_seconds();
3543         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3544         req->rq_phase           = RQ_PHASE_INTERPRET;
3545         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3546         req->rq_xid             = ptlrpc_next_xid();
3547         req->rq_import_generation = req->rq_import->imp_generation;
3548
3549         ptlrpcd_add_req(req);
3550 }
3551
3552 static int work_interpreter(const struct lu_env *env,
3553                             struct ptlrpc_request *req, void *args, int rc)
3554 {
3555         struct ptlrpc_work_async_args *arg = args;
3556
3557         LASSERT(ptlrpcd_check_work(req));
3558         LASSERT(arg->cb != NULL);
3559
3560         rc = arg->cb(env, arg->cbdata);
3561
3562         list_del_init(&req->rq_set_chain);
3563         req->rq_set = NULL;
3564
3565         if (atomic_dec_return(&req->rq_refcount) > 1) {
3566                 atomic_set(&req->rq_refcount, 2);
3567                 ptlrpcd_add_work_req(req);
3568         }
3569         return rc;
3570 }
3571
3572 static int worker_format;
3573
3574 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3575 {
3576         return req->rq_pill.rc_fmt == (void *)&worker_format;
3577 }
3578
3579 /**
3580  * Create a work for ptlrpc.
3581  */
3582 void *ptlrpcd_alloc_work(struct obd_import *imp,
3583                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3584 {
3585         struct ptlrpc_request *req = NULL;
3586         struct ptlrpc_work_async_args *args;
3587
3588         ENTRY;
3589         might_sleep();
3590
3591         if (!cb)
3592                 RETURN(ERR_PTR(-EINVAL));
3593
3594         /* copy some code from deprecated fakereq. */
3595         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3596         if (!req) {
3597                 CERROR("ptlrpc: run out of memory!\n");
3598                 RETURN(ERR_PTR(-ENOMEM));
3599         }
3600
3601         ptlrpc_cli_req_init(req);
3602
3603         req->rq_send_state = LUSTRE_IMP_FULL;
3604         req->rq_type = PTL_RPC_MSG_REQUEST;
3605         req->rq_import = class_import_get(imp);
3606         req->rq_interpret_reply = work_interpreter;
3607         /* don't want reply */
3608         req->rq_no_delay = req->rq_no_resend = 1;
3609         req->rq_pill.rc_fmt = (void *)&worker_format;
3610
3611         args = ptlrpc_req_async_args(args, req);
3612         args->cb     = cb;
3613         args->cbdata = cbdata;
3614
3615         RETURN(req);
3616 }
3617 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3618
3619 void ptlrpcd_destroy_work(void *handler)
3620 {
3621         struct ptlrpc_request *req = handler;
3622
3623         if (req)
3624                 ptlrpc_req_finished(req);
3625 }
3626 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3627
3628 int ptlrpcd_queue_work(void *handler)
3629 {
3630         struct ptlrpc_request *req = handler;
3631
3632         /*
3633          * Check if the req is already being queued.
3634          *
3635          * Here comes a trick: it lacks a way of checking if a req is being
3636          * processed reliably in ptlrpc. Here I have to use refcount of req
3637          * for this purpose. This is okay because the caller should use this
3638          * req as opaque data. - Jinshan
3639          */
3640         LASSERT(atomic_read(&req->rq_refcount) > 0);
3641         if (atomic_inc_return(&req->rq_refcount) == 2)
3642                 ptlrpcd_add_work_req(req);
3643         return 0;
3644 }
3645 EXPORT_SYMBOL(ptlrpcd_queue_work);