Whamcloud - gitweb
LU-14594 ptlrpc: do not match reply with resent RPC
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 /** Implementation of client-side PortalRPC interfaces */
33
34 #define DEBUG_SUBSYSTEM S_RPC
35
36 #include <linux/delay.h>
37 #include <linux/random.h>
38
39 #include <lnet/lib-lnet.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_lib.h>
43 #include <lustre_ha.h>
44 #include <lustre_import.h>
45 #include <lustre_req_layout.h>
46
47 #include "ptlrpc_internal.h"
48
49 static void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
50                                       struct page *page, int pageoffset,
51                                       int len)
52 {
53         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
54 }
55
56 static void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
57                                         struct page *page, int pageoffset,
58                                         int len)
59 {
60         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
61 }
62
63 static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
64 {
65         int i;
66
67         for (i = 0; i < desc->bd_iov_count ; i++)
68                 put_page(desc->bd_vec[i].bv_page);
69 }
70
71 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
72                                        void *frag, int len)
73 {
74         unsigned int offset = (unsigned long)frag & ~PAGE_MASK;
75
76         ENTRY;
77         while (len > 0) {
78                 int page_len = min_t(unsigned int, PAGE_SIZE - offset,
79                                      len);
80                 unsigned long vaddr = (unsigned long)frag;
81
82                 ptlrpc_prep_bulk_page_nopin(desc,
83                                             lnet_kvaddr_to_page(vaddr),
84                                             offset, page_len);
85                 offset = 0;
86                 len -= page_len;
87                 frag += page_len;
88         }
89
90         RETURN(desc->bd_nob);
91 }
92
93 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
94         .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
95         .release_frags  = ptlrpc_release_bulk_page_pin,
96 };
97 EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
98
99 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
100         .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
101         .release_frags  = ptlrpc_release_bulk_noop,
102         .add_iov_frag   = ptlrpc_prep_bulk_frag_pages,
103 };
104 EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
105
106 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
107 static int ptlrpcd_check_work(struct ptlrpc_request *req);
108 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
109
110 /**
111  * Initialize passed in client structure \a cl.
112  */
113 void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
114                         struct ptlrpc_client *cl)
115 {
116         cl->cli_request_portal = req_portal;
117         cl->cli_reply_portal   = rep_portal;
118         cl->cli_name           = name;
119 }
120 EXPORT_SYMBOL(ptlrpc_init_client);
121
122 /**
123  * Return PortalRPC connection for remore uud \a uuid
124  */
125 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
126                                                     lnet_nid_t nid4refnet)
127 {
128         struct ptlrpc_connection *c;
129         lnet_nid_t self;
130         struct lnet_process_id peer;
131         int err;
132
133         /*
134          * ptlrpc_uuid_to_peer() initializes its 2nd parameter
135          * before accessing its values.
136          */
137         /* coverity[uninit_use_in_call] */
138         peer.nid = nid4refnet;
139         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
140         if (err != 0) {
141                 CNETERR("cannot find peer %s!\n", uuid->uuid);
142                 return NULL;
143         }
144
145         c = ptlrpc_connection_get(peer, self, uuid);
146         if (c) {
147                 memcpy(c->c_remote_uuid.uuid,
148                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
149         }
150
151         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
152
153         return c;
154 }
155
156 /**
157  * Allocate and initialize new bulk descriptor on the sender.
158  * Returns pointer to the descriptor or NULL on error.
159  */
160 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
161                                          unsigned int max_brw,
162                                          enum ptlrpc_bulk_op_type type,
163                                          unsigned int portal,
164                                          const struct ptlrpc_bulk_frag_ops *ops)
165 {
166         struct ptlrpc_bulk_desc *desc;
167         int i;
168
169         LASSERT(ops->add_kiov_frag != NULL);
170
171         if (max_brw > PTLRPC_BULK_OPS_COUNT)
172                 RETURN(NULL);
173
174         if (nfrags > LNET_MAX_IOV * max_brw)
175                 RETURN(NULL);
176
177         OBD_ALLOC_PTR(desc);
178         if (!desc)
179                 return NULL;
180
181         OBD_ALLOC_LARGE(desc->bd_vec,
182                         nfrags * sizeof(*desc->bd_vec));
183         if (!desc->bd_vec)
184                 goto out;
185
186         spin_lock_init(&desc->bd_lock);
187         init_waitqueue_head(&desc->bd_waitq);
188         desc->bd_max_iov = nfrags;
189         desc->bd_iov_count = 0;
190         desc->bd_portal = portal;
191         desc->bd_type = type;
192         desc->bd_md_count = 0;
193         desc->bd_nob_last = LNET_MTU;
194         desc->bd_frag_ops = ops;
195         LASSERT(max_brw > 0);
196         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
197         /*
198          * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
199          * node. Negotiated ocd_brw_size will always be <= this number.
200          */
201         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
202                 LNetInvalidateMDHandle(&desc->bd_mds[i]);
203
204         return desc;
205 out:
206         OBD_FREE_PTR(desc);
207         return NULL;
208 }
209
210 /**
211  * Prepare bulk descriptor for specified outgoing request \a req that
212  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
213  * the bulk to be sent. Used on client-side.
214  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
215  * error.
216  */
217 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
218                                               unsigned int nfrags,
219                                               unsigned int max_brw,
220                                               unsigned int type,
221                                               unsigned int portal,
222                                               const struct ptlrpc_bulk_frag_ops
223                                                 *ops)
224 {
225         struct obd_import *imp = req->rq_import;
226         struct ptlrpc_bulk_desc *desc;
227
228         ENTRY;
229         LASSERT(ptlrpc_is_bulk_op_passive(type));
230
231         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
232         if (!desc)
233                 RETURN(NULL);
234
235         desc->bd_import = class_import_get(imp);
236         desc->bd_req = req;
237
238         desc->bd_cbid.cbid_fn  = client_bulk_callback;
239         desc->bd_cbid.cbid_arg = desc;
240
241         /* This makes req own desc, and free it when she frees herself */
242         req->rq_bulk = desc;
243
244         return desc;
245 }
246 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
247
248 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
249                              struct page *page, int pageoffset, int len,
250                              int pin)
251 {
252         struct bio_vec *kiov;
253
254         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
255         LASSERT(page != NULL);
256         LASSERT(pageoffset >= 0);
257         LASSERT(len > 0);
258         LASSERT(pageoffset + len <= PAGE_SIZE);
259
260         kiov = &desc->bd_vec[desc->bd_iov_count];
261
262         if (((desc->bd_iov_count % LNET_MAX_IOV) == 0) ||
263              ((desc->bd_nob_last + len) > LNET_MTU)) {
264                 desc->bd_mds_off[desc->bd_md_count] = desc->bd_iov_count;
265                 desc->bd_md_count++;
266                 desc->bd_nob_last = 0;
267                 LASSERT(desc->bd_md_count <= PTLRPC_BULK_OPS_COUNT);
268         }
269
270         desc->bd_nob_last += len;
271         desc->bd_nob += len;
272
273         if (pin)
274                 get_page(page);
275
276         kiov->bv_page = page;
277         kiov->bv_offset = pageoffset;
278         kiov->bv_len = len;
279
280         desc->bd_iov_count++;
281 }
282 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
283
284 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
285 {
286         ENTRY;
287
288         LASSERT(desc != NULL);
289         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
290         LASSERT(desc->bd_refs == 0);         /* network hands off */
291         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
292         LASSERT(desc->bd_frag_ops != NULL);
293
294         sptlrpc_enc_pool_put_pages(desc);
295
296         if (desc->bd_export)
297                 class_export_put(desc->bd_export);
298         else
299                 class_import_put(desc->bd_import);
300
301         if (desc->bd_frag_ops->release_frags != NULL)
302                 desc->bd_frag_ops->release_frags(desc);
303
304         OBD_FREE_LARGE(desc->bd_vec,
305                        desc->bd_max_iov * sizeof(*desc->bd_vec));
306         OBD_FREE_PTR(desc);
307         EXIT;
308 }
309 EXPORT_SYMBOL(ptlrpc_free_bulk);
310
311 /**
312  * Set server timelimit for this req, i.e. how long are we willing to wait
313  * for reply before timing out this request.
314  */
315 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
316 {
317         LASSERT(req->rq_import);
318
319         if (AT_OFF) {
320                 /* non-AT settings */
321                 /**
322                  * \a imp_server_timeout means this is reverse import and
323                  * we send (currently only) ASTs to the client and cannot afford
324                  * to wait too long for the reply, otherwise the other client
325                  * (because of which we are sending this request) would
326                  * timeout waiting for us
327                  */
328                 req->rq_timeout = req->rq_import->imp_server_timeout ?
329                                   obd_timeout / 2 : obd_timeout;
330         } else {
331                 struct imp_at *at = &req->rq_import->imp_at;
332                 timeout_t serv_est;
333                 int idx;
334
335                 idx = import_at_get_index(req->rq_import,
336                                           req->rq_request_portal);
337                 serv_est = at_get(&at->iat_service_estimate[idx]);
338                 /*
339                  * Currently a 32 bit value is sent over the
340                  * wire for rq_timeout so please don't change this
341                  * to time64_t. The work for LU-1158 will in time
342                  * replace rq_timeout with a 64 bit nanosecond value
343                  */
344                 req->rq_timeout = at_est2timeout(serv_est);
345         }
346         /*
347          * We could get even fancier here, using history to predict increased
348          * loading...
349          *
350          * Let the server know what this RPC timeout is by putting it in the
351          * reqmsg
352          */
353         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
354 }
355 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
356
357 /* Adjust max service estimate based on server value */
358 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
359                                   timeout_t serv_est)
360 {
361         int idx;
362         timeout_t oldse;
363         struct imp_at *at;
364
365         LASSERT(req->rq_import);
366         at = &req->rq_import->imp_at;
367
368         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
369         /*
370          * max service estimates are tracked on the server side,
371          * so just keep minimal history here
372          */
373         oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
374         if (oldse != 0)
375                 CDEBUG(D_ADAPTTO,
376                        "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
377                        req->rq_import->imp_obd->obd_name,
378                        req->rq_request_portal,
379                        oldse, at_get(&at->iat_service_estimate[idx]));
380 }
381
382 /* Expected network latency per remote node (secs) */
383 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
384 {
385         return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
386 }
387
388 /* Adjust expected network latency */
389 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
390                                timeout_t service_timeout)
391 {
392         time64_t now = ktime_get_real_seconds();
393         struct imp_at *at;
394         timeout_t oldnl;
395         timeout_t nl;
396
397         LASSERT(req->rq_import);
398
399         if (service_timeout > now - req->rq_sent + 3) {
400                 /*
401                  * b=16408, however, this can also happen if early reply
402                  * is lost and client RPC is expired and resent, early reply
403                  * or reply of original RPC can still be fit in reply buffer
404                  * of resent RPC, now client is measuring time from the
405                  * resent time, but server sent back service time of original
406                  * RPC.
407                  */
408                 CDEBUG_LIMIT((lustre_msg_get_flags(req->rq_reqmsg) &
409                               MSG_RESENT) ?  D_ADAPTTO : D_WARNING,
410                              "Reported service time %u > total measured time %lld\n",
411                              service_timeout, now - req->rq_sent);
412                 return;
413         }
414
415         /* Network latency is total time less server processing time,
416          * st rounding
417          */
418         nl = max_t(timeout_t, now - req->rq_sent - service_timeout, 0) + 1;
419         at = &req->rq_import->imp_at;
420
421         oldnl = at_measured(&at->iat_net_latency, nl);
422         if (oldnl != 0)
423                 CDEBUG(D_ADAPTTO,
424                        "The network latency for %s (nid %s) has changed from %d to %d\n",
425                        req->rq_import->imp_obd->obd_name,
426                        obd_uuid2str(&req->rq_import->imp_connection->c_remote_uuid),
427                        oldnl, at_get(&at->iat_net_latency));
428 }
429
430 static int unpack_reply(struct ptlrpc_request *req)
431 {
432         int rc;
433
434         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
435                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
436                 if (rc) {
437                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: rc = %d",
438                                   rc);
439                         return -EPROTO;
440                 }
441         }
442
443         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
444         if (rc) {
445                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: rc = %d",
446                           rc);
447                 return -EPROTO;
448         }
449         return 0;
450 }
451
452 /**
453  * Handle an early reply message, called with the rq_lock held.
454  * If anything goes wrong just ignore it - same as if it never happened
455  */
456 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
457 __must_hold(&req->rq_lock)
458 {
459         struct ptlrpc_request *early_req;
460         timeout_t service_timeout;
461         time64_t olddl;
462         int rc;
463
464         ENTRY;
465         req->rq_early = 0;
466         spin_unlock(&req->rq_lock);
467
468         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
469         if (rc) {
470                 spin_lock(&req->rq_lock);
471                 RETURN(rc);
472         }
473
474         rc = unpack_reply(early_req);
475         if (rc != 0) {
476                 sptlrpc_cli_finish_early_reply(early_req);
477                 spin_lock(&req->rq_lock);
478                 RETURN(rc);
479         }
480
481         /*
482          * Use new timeout value just to adjust the local value for this
483          * request, don't include it into at_history. It is unclear yet why
484          * service time increased and should it be counted or skipped, e.g.
485          * that can be recovery case or some error or server, the real reply
486          * will add all new data if it is worth to add.
487          */
488         req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
489         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
490
491         /* Network latency can be adjusted, it is pure network delays */
492         service_timeout = lustre_msg_get_service_timeout(early_req->rq_repmsg);
493         ptlrpc_at_adj_net_latency(req, service_timeout);
494
495         sptlrpc_cli_finish_early_reply(early_req);
496
497         spin_lock(&req->rq_lock);
498         olddl = req->rq_deadline;
499         /*
500          * server assumes it now has rq_timeout from when the request
501          * arrived, so the client should give it at least that long.
502          * since we don't know the arrival time we'll use the original
503          * sent time
504          */
505         req->rq_deadline = req->rq_sent + req->rq_timeout +
506                            ptlrpc_at_get_net_latency(req);
507
508         /* The below message is checked in replay-single.sh test_65{a,b} */
509         /* The below message is checked in sanity-{gss,krb5} test_8 */
510         DEBUG_REQ(D_ADAPTTO, req,
511                   "Early reply #%d, new deadline in %llds (%llds)",
512                   req->rq_early_count,
513                   req->rq_deadline - ktime_get_real_seconds(),
514                   req->rq_deadline - olddl);
515
516         RETURN(rc);
517 }
518
519 static struct kmem_cache *request_cache;
520
521 int ptlrpc_request_cache_init(void)
522 {
523         request_cache = kmem_cache_create("ptlrpc_cache",
524                                           sizeof(struct ptlrpc_request),
525                                           0, SLAB_HWCACHE_ALIGN, NULL);
526         return request_cache ? 0 : -ENOMEM;
527 }
528
529 void ptlrpc_request_cache_fini(void)
530 {
531         kmem_cache_destroy(request_cache);
532 }
533
534 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
535 {
536         struct ptlrpc_request *req;
537
538         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
539         return req;
540 }
541
542 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
543 {
544         OBD_SLAB_FREE_PTR(req, request_cache);
545 }
546
547 /**
548  * Wind down request pool \a pool.
549  * Frees all requests from the pool too
550  */
551 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
552 {
553         struct ptlrpc_request *req;
554
555         LASSERT(pool != NULL);
556
557         spin_lock(&pool->prp_lock);
558         while ((req = list_first_entry_or_null(&pool->prp_req_list,
559                                                struct ptlrpc_request,
560                                                rq_list))) {
561                 list_del(&req->rq_list);
562                 LASSERT(req->rq_reqbuf);
563                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
564                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
565                 ptlrpc_request_cache_free(req);
566         }
567         spin_unlock(&pool->prp_lock);
568         OBD_FREE(pool, sizeof(*pool));
569 }
570 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
571
572 /**
573  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
574  */
575 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
576 {
577         int i;
578         int size = 1;
579
580         while (size < pool->prp_rq_size)
581                 size <<= 1;
582
583         LASSERTF(list_empty(&pool->prp_req_list) ||
584                  size == pool->prp_rq_size,
585                  "Trying to change pool size with nonempty pool from %d to %d bytes\n",
586                  pool->prp_rq_size, size);
587
588         pool->prp_rq_size = size;
589         for (i = 0; i < num_rq; i++) {
590                 struct ptlrpc_request *req;
591                 struct lustre_msg *msg;
592
593                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
594                 if (!req)
595                         return i;
596                 OBD_ALLOC_LARGE(msg, size);
597                 if (!msg) {
598                         ptlrpc_request_cache_free(req);
599                         return i;
600                 }
601                 req->rq_reqbuf = msg;
602                 req->rq_reqbuf_len = size;
603                 req->rq_pool = pool;
604                 spin_lock(&pool->prp_lock);
605                 list_add_tail(&req->rq_list, &pool->prp_req_list);
606                 spin_unlock(&pool->prp_lock);
607         }
608         return num_rq;
609 }
610 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
611
612 /**
613  * Create and initialize new request pool with given attributes:
614  * \a num_rq - initial number of requests to create for the pool
615  * \a msgsize - maximum message size possible for requests in thid pool
616  * \a populate_pool - function to be called when more requests need to be added
617  *                    to the pool
618  * Returns pointer to newly created pool or NULL on error.
619  */
620 struct ptlrpc_request_pool *
621 ptlrpc_init_rq_pool(int num_rq, int msgsize,
622                     int (*populate_pool)(struct ptlrpc_request_pool *, int))
623 {
624         struct ptlrpc_request_pool *pool;
625
626         OBD_ALLOC_PTR(pool);
627         if (!pool)
628                 return NULL;
629
630         /*
631          * Request next power of two for the allocation, because internally
632          * kernel would do exactly this
633          */
634         spin_lock_init(&pool->prp_lock);
635         INIT_LIST_HEAD(&pool->prp_req_list);
636         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
637         pool->prp_populate = populate_pool;
638
639         populate_pool(pool, num_rq);
640
641         return pool;
642 }
643 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
644
645 /**
646  * Fetches one request from pool \a pool
647  */
648 static struct ptlrpc_request *
649 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
650 {
651         struct ptlrpc_request *request;
652         struct lustre_msg *reqbuf;
653
654         if (!pool)
655                 return NULL;
656
657         spin_lock(&pool->prp_lock);
658
659         /*
660          * See if we have anything in a pool, and bail out if nothing,
661          * in writeout path, where this matters, this is safe to do, because
662          * nothing is lost in this case, and when some in-flight requests
663          * complete, this code will be called again.
664          */
665         if (unlikely(list_empty(&pool->prp_req_list))) {
666                 spin_unlock(&pool->prp_lock);
667                 return NULL;
668         }
669
670         request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
671                              rq_list);
672         list_del_init(&request->rq_list);
673         spin_unlock(&pool->prp_lock);
674
675         LASSERT(request->rq_reqbuf);
676         LASSERT(request->rq_pool);
677
678         reqbuf = request->rq_reqbuf;
679         memset(request, 0, sizeof(*request));
680         request->rq_reqbuf = reqbuf;
681         request->rq_reqbuf_len = pool->prp_rq_size;
682         request->rq_pool = pool;
683
684         return request;
685 }
686
687 /**
688  * Returns freed \a request to pool.
689  */
690 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
691 {
692         struct ptlrpc_request_pool *pool = request->rq_pool;
693
694         spin_lock(&pool->prp_lock);
695         LASSERT(list_empty(&request->rq_list));
696         LASSERT(!request->rq_receiving_reply);
697         list_add_tail(&request->rq_list, &pool->prp_req_list);
698         spin_unlock(&pool->prp_lock);
699 }
700
701 void ptlrpc_add_unreplied(struct ptlrpc_request *req)
702 {
703         struct obd_import *imp = req->rq_import;
704         struct ptlrpc_request *iter;
705
706         assert_spin_locked(&imp->imp_lock);
707         LASSERT(list_empty(&req->rq_unreplied_list));
708
709         /* unreplied list is sorted by xid in ascending order */
710         list_for_each_entry_reverse(iter, &imp->imp_unreplied_list,
711                                     rq_unreplied_list) {
712                 LASSERT(req->rq_xid != iter->rq_xid);
713                 if (req->rq_xid < iter->rq_xid)
714                         continue;
715                 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
716                 return;
717         }
718         list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
719 }
720
721 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
722 {
723         req->rq_xid = ptlrpc_next_xid();
724         ptlrpc_add_unreplied(req);
725 }
726
727 static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
728 {
729         spin_lock(&req->rq_import->imp_lock);
730         ptlrpc_assign_next_xid_nolock(req);
731         spin_unlock(&req->rq_import->imp_lock);
732 }
733
734 static atomic64_t ptlrpc_last_xid;
735
736 static void ptlrpc_reassign_next_xid(struct ptlrpc_request *req)
737 {
738         spin_lock(&req->rq_import->imp_lock);
739         list_del_init(&req->rq_unreplied_list);
740         ptlrpc_assign_next_xid_nolock(req);
741         spin_unlock(&req->rq_import->imp_lock);
742         DEBUG_REQ(D_RPCTRACE, req, "reassign xid");
743 }
744
745 void ptlrpc_get_mod_rpc_slot(struct ptlrpc_request *req)
746 {
747         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
748         __u32 opc;
749         __u16 tag;
750
751         opc = lustre_msg_get_opc(req->rq_reqmsg);
752         tag = obd_get_mod_rpc_slot(cli, opc);
753         lustre_msg_set_tag(req->rq_reqmsg, tag);
754         ptlrpc_reassign_next_xid(req);
755 }
756 EXPORT_SYMBOL(ptlrpc_get_mod_rpc_slot);
757
758 void ptlrpc_put_mod_rpc_slot(struct ptlrpc_request *req)
759 {
760         __u16 tag = lustre_msg_get_tag(req->rq_reqmsg);
761
762         if (tag != 0) {
763                 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
764                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
765
766                 obd_put_mod_rpc_slot(cli, opc, tag);
767         }
768 }
769 EXPORT_SYMBOL(ptlrpc_put_mod_rpc_slot);
770
771 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
772                              __u32 version, int opcode, char **bufs,
773                              struct ptlrpc_cli_ctx *ctx)
774 {
775         int count;
776         struct obd_import *imp;
777         __u32 *lengths;
778         int rc;
779
780         ENTRY;
781
782         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
783         imp = request->rq_import;
784         lengths = request->rq_pill.rc_area[RCL_CLIENT];
785
786         if (ctx) {
787                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
788         } else {
789                 rc = sptlrpc_req_get_ctx(request);
790                 if (rc)
791                         GOTO(out_free, rc);
792         }
793         sptlrpc_req_set_flavor(request, opcode);
794
795         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
796                                  lengths, bufs);
797         if (rc)
798                 GOTO(out_ctx, rc);
799
800         lustre_msg_add_version(request->rq_reqmsg, version);
801         request->rq_send_state = LUSTRE_IMP_FULL;
802         request->rq_type = PTL_RPC_MSG_REQUEST;
803
804         request->rq_req_cbid.cbid_fn  = request_out_callback;
805         request->rq_req_cbid.cbid_arg = request;
806
807         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
808         request->rq_reply_cbid.cbid_arg = request;
809
810         request->rq_reply_deadline = 0;
811         request->rq_bulk_deadline = 0;
812         request->rq_req_deadline = 0;
813         request->rq_phase = RQ_PHASE_NEW;
814         request->rq_next_phase = RQ_PHASE_UNDEFINED;
815
816         request->rq_request_portal = imp->imp_client->cli_request_portal;
817         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
818
819         ptlrpc_at_set_req_timeout(request);
820
821         lustre_msg_set_opc(request->rq_reqmsg, opcode);
822
823         /* Let's setup deadline for req/reply/bulk unlink for opcode. */
824         if (cfs_fail_val == opcode) {
825                 time64_t *fail_t = NULL, *fail2_t = NULL;
826
827                 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
828                         fail_t = &request->rq_bulk_deadline;
829                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
830                         fail_t = &request->rq_reply_deadline;
831                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
832                         fail_t = &request->rq_req_deadline;
833                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
834                         fail_t = &request->rq_reply_deadline;
835                         fail2_t = &request->rq_bulk_deadline;
836                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_ROUND_XID)) {
837                         time64_t now = ktime_get_real_seconds();
838                         u64 xid = ((u64)now >> 4) << 24;
839
840                         atomic64_set(&ptlrpc_last_xid, xid);
841                 }
842
843                 if (fail_t) {
844                         *fail_t = ktime_get_real_seconds() +
845                                   PTLRPC_REQ_LONG_UNLINK;
846
847                         if (fail2_t)
848                                 *fail2_t = ktime_get_real_seconds() +
849                                            PTLRPC_REQ_LONG_UNLINK;
850
851                         /*
852                          * The RPC is infected, let the test to change the
853                          * fail_loc
854                          */
855                         msleep(4 * MSEC_PER_SEC);
856                 }
857         }
858         ptlrpc_assign_next_xid(request);
859
860         RETURN(0);
861
862 out_ctx:
863         LASSERT(!request->rq_pool);
864         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
865 out_free:
866         atomic_dec(&imp->imp_reqs);
867         class_import_put(imp);
868
869         return rc;
870 }
871 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
872
873 /**
874  * Pack request buffers for network transfer, performing necessary encryption
875  * steps if necessary.
876  */
877 int ptlrpc_request_pack(struct ptlrpc_request *request,
878                         __u32 version, int opcode)
879 {
880         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
881 }
882 EXPORT_SYMBOL(ptlrpc_request_pack);
883
884 /**
885  * Helper function to allocate new request on import \a imp
886  * and possibly using existing request from pool \a pool if provided.
887  * Returns allocated request structure with import field filled or
888  * NULL on error.
889  */
890 static inline
891 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
892                                               struct ptlrpc_request_pool *pool)
893 {
894         struct ptlrpc_request *request = NULL;
895
896         request = ptlrpc_request_cache_alloc(GFP_NOFS);
897
898         if (!request && pool)
899                 request = ptlrpc_prep_req_from_pool(pool);
900
901         if (request) {
902                 ptlrpc_cli_req_init(request);
903
904                 LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp);
905                 LASSERT(imp != LP_POISON);
906                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
907                          imp->imp_client);
908                 LASSERT(imp->imp_client != LP_POISON);
909
910                 request->rq_import = class_import_get(imp);
911                 atomic_inc(&imp->imp_reqs);
912         } else {
913                 CERROR("request allocation out of memory\n");
914         }
915
916         return request;
917 }
918
919 static int ptlrpc_reconnect_if_idle(struct obd_import *imp)
920 {
921         int rc;
922
923         /*
924          * initiate connection if needed when the import has been
925          * referenced by the new request to avoid races with disconnect.
926          * serialize this check against conditional state=IDLE
927          * in ptlrpc_disconnect_idle_interpret()
928          */
929         spin_lock(&imp->imp_lock);
930         if (imp->imp_state == LUSTRE_IMP_IDLE) {
931                 imp->imp_generation++;
932                 imp->imp_initiated_at = imp->imp_generation;
933                 imp->imp_state = LUSTRE_IMP_NEW;
934
935                 /* connect_import_locked releases imp_lock */
936                 rc = ptlrpc_connect_import_locked(imp);
937                 if (rc)
938                         return rc;
939                 ptlrpc_pinger_add_import(imp);
940         } else {
941                 spin_unlock(&imp->imp_lock);
942         }
943         return 0;
944 }
945
946 /**
947  * Helper function for creating a request.
948  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
949  * buffer structures according to capsule template \a format.
950  * Returns allocated request structure pointer or NULL on error.
951  */
952 static struct ptlrpc_request *
953 ptlrpc_request_alloc_internal(struct obd_import *imp,
954                               struct ptlrpc_request_pool *pool,
955                               const struct req_format *format)
956 {
957         struct ptlrpc_request *request;
958
959         request = __ptlrpc_request_alloc(imp, pool);
960         if (!request)
961                 return NULL;
962
963         /* don't make expensive check for idling connection
964          * if it's already connected */
965         if (unlikely(imp->imp_state != LUSTRE_IMP_FULL)) {
966                 if (ptlrpc_reconnect_if_idle(imp) < 0) {
967                         atomic_dec(&imp->imp_reqs);
968                         ptlrpc_request_free(request);
969                         return NULL;
970                 }
971         }
972
973         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
974         req_capsule_set(&request->rq_pill, format);
975         return request;
976 }
977
978 /**
979  * Allocate new request structure for import \a imp and initialize its
980  * buffer structure according to capsule template \a format.
981  */
982 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
983                                             const struct req_format *format)
984 {
985         return ptlrpc_request_alloc_internal(imp, NULL, format);
986 }
987 EXPORT_SYMBOL(ptlrpc_request_alloc);
988
989 /**
990  * Allocate new request structure for import \a imp from pool \a pool and
991  * initialize its buffer structure according to capsule template \a format.
992  */
993 struct ptlrpc_request *
994 ptlrpc_request_alloc_pool(struct obd_import *imp,
995                           struct ptlrpc_request_pool *pool,
996                           const struct req_format *format)
997 {
998         return ptlrpc_request_alloc_internal(imp, pool, format);
999 }
1000 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
1001
1002 /**
1003  * For requests not from pool, free memory of the request structure.
1004  * For requests obtained from a pool earlier, return request back to pool.
1005  */
1006 void ptlrpc_request_free(struct ptlrpc_request *request)
1007 {
1008         if (request->rq_pool)
1009                 __ptlrpc_free_req_to_pool(request);
1010         else
1011                 ptlrpc_request_cache_free(request);
1012 }
1013 EXPORT_SYMBOL(ptlrpc_request_free);
1014
1015 /**
1016  * Allocate new request for operatione \a opcode and immediatelly pack it for
1017  * network transfer.
1018  * Only used for simple requests like OBD_PING where the only important
1019  * part of the request is operation itself.
1020  * Returns allocated request or NULL on error.
1021  */
1022 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1023                                                  const struct req_format *format,
1024                                                  __u32 version, int opcode)
1025 {
1026         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
1027         int rc;
1028
1029         if (req) {
1030                 rc = ptlrpc_request_pack(req, version, opcode);
1031                 if (rc) {
1032                         ptlrpc_request_free(req);
1033                         req = NULL;
1034                 }
1035         }
1036         return req;
1037 }
1038 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
1039
1040 /**
1041  * Allocate and initialize new request set structure on the current CPT.
1042  * Returns a pointer to the newly allocated set structure or NULL on error.
1043  */
1044 struct ptlrpc_request_set *ptlrpc_prep_set(void)
1045 {
1046         struct ptlrpc_request_set *set;
1047         int cpt;
1048
1049         ENTRY;
1050         cpt = cfs_cpt_current(cfs_cpt_tab, 0);
1051         OBD_CPT_ALLOC(set, cfs_cpt_tab, cpt, sizeof(*set));
1052         if (!set)
1053                 RETURN(NULL);
1054         atomic_set(&set->set_refcount, 1);
1055         INIT_LIST_HEAD(&set->set_requests);
1056         init_waitqueue_head(&set->set_waitq);
1057         atomic_set(&set->set_new_count, 0);
1058         atomic_set(&set->set_remaining, 0);
1059         spin_lock_init(&set->set_new_req_lock);
1060         INIT_LIST_HEAD(&set->set_new_requests);
1061         set->set_max_inflight = UINT_MAX;
1062         set->set_producer     = NULL;
1063         set->set_producer_arg = NULL;
1064         set->set_rc           = 0;
1065
1066         RETURN(set);
1067 }
1068 EXPORT_SYMBOL(ptlrpc_prep_set);
1069
1070 /**
1071  * Allocate and initialize new request set structure with flow control
1072  * extension. This extension allows to control the number of requests in-flight
1073  * for the whole set. A callback function to generate requests must be provided
1074  * and the request set will keep the number of requests sent over the wire to
1075  * @max_inflight.
1076  * Returns a pointer to the newly allocated set structure or NULL on error.
1077  */
1078 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1079                                              void *arg)
1080
1081 {
1082         struct ptlrpc_request_set *set;
1083
1084         set = ptlrpc_prep_set();
1085         if (!set)
1086                 RETURN(NULL);
1087
1088         set->set_max_inflight  = max;
1089         set->set_producer      = func;
1090         set->set_producer_arg  = arg;
1091
1092         RETURN(set);
1093 }
1094
1095 /**
1096  * Wind down and free request set structure previously allocated with
1097  * ptlrpc_prep_set.
1098  * Ensures that all requests on the set have completed and removes
1099  * all requests from the request list in a set.
1100  * If any unsent request happen to be on the list, pretends that they got
1101  * an error in flight and calls their completion handler.
1102  */
1103 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
1104 {
1105         struct ptlrpc_request *req;
1106         int expected_phase;
1107         int n = 0;
1108
1109         ENTRY;
1110
1111         /* Requests on the set should either all be completed, or all be new */
1112         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
1113                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
1114         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
1115                 LASSERT(req->rq_phase == expected_phase);
1116                 n++;
1117         }
1118
1119         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
1120                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
1121                  atomic_read(&set->set_remaining), n);
1122
1123         while ((req = list_first_entry_or_null(&set->set_requests,
1124                                                struct ptlrpc_request,
1125                                                rq_set_chain))) {
1126                 list_del_init(&req->rq_set_chain);
1127
1128                 LASSERT(req->rq_phase == expected_phase);
1129
1130                 if (req->rq_phase == RQ_PHASE_NEW) {
1131                         ptlrpc_req_interpret(NULL, req, -EBADR);
1132                         atomic_dec(&set->set_remaining);
1133                 }
1134
1135                 spin_lock(&req->rq_lock);
1136                 req->rq_set = NULL;
1137                 req->rq_invalid_rqset = 0;
1138                 spin_unlock(&req->rq_lock);
1139
1140                 ptlrpc_req_finished(req);
1141         }
1142
1143         LASSERT(atomic_read(&set->set_remaining) == 0);
1144
1145         ptlrpc_reqset_put(set);
1146         EXIT;
1147 }
1148 EXPORT_SYMBOL(ptlrpc_set_destroy);
1149
1150 /**
1151  * Add a new request to the general purpose request set.
1152  * Assumes request reference from the caller.
1153  */
1154 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
1155                         struct ptlrpc_request *req)
1156 {
1157         if (set == PTLRPCD_SET) {
1158                 ptlrpcd_add_req(req);
1159                 return;
1160         }
1161
1162         LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
1163         LASSERT(list_empty(&req->rq_set_chain));
1164
1165         if (req->rq_allow_intr)
1166                 set->set_allow_intr = 1;
1167
1168         /* The set takes over the caller's request reference */
1169         list_add_tail(&req->rq_set_chain, &set->set_requests);
1170         req->rq_set = set;
1171         atomic_inc(&set->set_remaining);
1172         req->rq_queued_time = ktime_get_seconds();
1173
1174         if (req->rq_reqmsg)
1175                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1176
1177         if (set->set_producer)
1178                 /*
1179                  * If the request set has a producer callback, the RPC must be
1180                  * sent straight away
1181                  */
1182                 ptlrpc_send_new_req(req);
1183 }
1184 EXPORT_SYMBOL(ptlrpc_set_add_req);
1185
1186 /**
1187  * Add a request to a request with dedicated server thread
1188  * and wake the thread to make any necessary processing.
1189  * Currently only used for ptlrpcd.
1190  */
1191 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1192                             struct ptlrpc_request *req)
1193 {
1194         struct ptlrpc_request_set *set = pc->pc_set;
1195         int count, i;
1196
1197         LASSERT(req->rq_set == NULL);
1198         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1199
1200         spin_lock(&set->set_new_req_lock);
1201         /*
1202          * The set takes over the caller's request reference.
1203          */
1204         req->rq_set = set;
1205         req->rq_queued_time = ktime_get_seconds();
1206         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1207         count = atomic_inc_return(&set->set_new_count);
1208         spin_unlock(&set->set_new_req_lock);
1209
1210         /* Only need to call wakeup once for the first entry. */
1211         if (count == 1) {
1212                 wake_up(&set->set_waitq);
1213
1214                 /*
1215                  * XXX: It maybe unnecessary to wakeup all the partners. But to
1216                  *      guarantee the async RPC can be processed ASAP, we have
1217                  *      no other better choice. It maybe fixed in future.
1218                  */
1219                 for (i = 0; i < pc->pc_npartners; i++)
1220                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1221         }
1222 }
1223
1224 /**
1225  * Based on the current state of the import, determine if the request
1226  * can be sent, is an error, or should be delayed.
1227  *
1228  * Returns true if this request should be delayed. If false, and
1229  * *status is set, then the request can not be sent and *status is the
1230  * error code.  If false and status is 0, then request can be sent.
1231  *
1232  * The imp->imp_lock must be held.
1233  */
1234 static int ptlrpc_import_delay_req(struct obd_import *imp,
1235                                    struct ptlrpc_request *req, int *status)
1236 {
1237         int delay = 0;
1238
1239         ENTRY;
1240         LASSERT(status);
1241         *status = 0;
1242
1243         if (req->rq_ctx_init || req->rq_ctx_fini) {
1244                 /* always allow ctx init/fini rpc go through */
1245         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1246                 DEBUG_REQ(D_ERROR, req, "Uninitialized import");
1247                 *status = -EIO;
1248         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1249                 unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
1250
1251                 /*
1252                  * pings or MDS-equivalent STATFS may safely
1253                  * race with umount
1254                  */
1255                 DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
1256                           D_HA : D_ERROR, req, "IMP_CLOSED");
1257                 *status = -EIO;
1258         } else if (ptlrpc_send_limit_expired(req)) {
1259                 /* probably doesn't need to be a D_ERROR afterinitial testing */
1260                 DEBUG_REQ(D_HA, req, "send limit expired");
1261                 *status = -ETIMEDOUT;
1262         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1263                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1264                 ;/* allow CONNECT even if import is invalid */
1265                 if (atomic_read(&imp->imp_inval_count) != 0) {
1266                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1267                         *status = -EIO;
1268                 }
1269         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1270                 if (!imp->imp_deactive)
1271                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1272                 *status = -ESHUTDOWN; /* b=12940 */
1273         } else if (req->rq_import_generation != imp->imp_generation) {
1274                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
1275                 *status = -EIO;
1276         } else if (req->rq_send_state != imp->imp_state) {
1277                 /* invalidate in progress - any requests should be drop */
1278                 if (atomic_read(&imp->imp_inval_count) != 0) {
1279                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1280                         *status = -EIO;
1281                 } else if (req->rq_no_delay &&
1282                            imp->imp_generation != imp->imp_initiated_at) {
1283                         /* ignore nodelay for requests initiating connections */
1284                         *status = -EAGAIN;
1285                 } else if (req->rq_allow_replay &&
1286                            (imp->imp_state == LUSTRE_IMP_REPLAY ||
1287                             imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1288                             imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1289                             imp->imp_state == LUSTRE_IMP_RECOVER)) {
1290                         DEBUG_REQ(D_HA, req, "allow during recovery");
1291                 } else {
1292                         delay = 1;
1293                 }
1294         }
1295
1296         RETURN(delay);
1297 }
1298
1299 /**
1300  * Decide if the error message should be printed to the console or not.
1301  * Makes its decision based on request type, status, and failure frequency.
1302  *
1303  * \param[in] req  request that failed and may need a console message
1304  *
1305  * \retval false if no message should be printed
1306  * \retval true  if console message should be printed
1307  */
1308 static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
1309 {
1310         LASSERT(req->rq_reqmsg != NULL);
1311
1312         /* Suppress particular reconnect errors which are to be expected. */
1313         if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
1314                 /* Suppress timed out reconnect requests */
1315                 if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
1316                     req->rq_timedout)
1317                         return false;
1318
1319                 /*
1320                  * Suppress most unavailable/again reconnect requests, but
1321                  * print occasionally so it is clear client is trying to
1322                  * connect to a server where no target is running.
1323                  */
1324                 if ((err == -ENODEV || err == -EAGAIN) &&
1325                     req->rq_import->imp_conn_cnt % 30 != 20)
1326                         return false;
1327         }
1328
1329         if (opc == LDLM_ENQUEUE && err == -EAGAIN)
1330                 /* -EAGAIN is normal when using POSIX flocks */
1331                 return false;
1332
1333         if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
1334             (req->rq_xid & 0xf) != 10)
1335                 /* Suppress most ping requests, they may fail occasionally */
1336                 return false;
1337
1338         return true;
1339 }
1340
1341 /**
1342  * Check request processing status.
1343  * Returns the status.
1344  */
1345 static int ptlrpc_check_status(struct ptlrpc_request *req)
1346 {
1347         int rc;
1348
1349         ENTRY;
1350         rc = lustre_msg_get_status(req->rq_repmsg);
1351         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1352                 struct obd_import *imp = req->rq_import;
1353                 lnet_nid_t nid = imp->imp_connection->c_peer.nid;
1354                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1355
1356                 if (ptlrpc_console_allow(req, opc, rc))
1357                         LCONSOLE_ERROR_MSG(0x11,
1358                                            "%s: operation %s to node %s failed: rc = %d\n",
1359                                            imp->imp_obd->obd_name,
1360                                            ll_opcode2str(opc),
1361                                            libcfs_nid2str(nid), rc);
1362                 RETURN(rc < 0 ? rc : -EINVAL);
1363         }
1364
1365         if (rc)
1366                 DEBUG_REQ(D_INFO, req, "check status: rc = %d", rc);
1367
1368         RETURN(rc);
1369 }
1370
1371 /**
1372  * save pre-versions of objects into request for replay.
1373  * Versions are obtained from server reply.
1374  * used for VBR.
1375  */
1376 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1377 {
1378         struct lustre_msg *repmsg = req->rq_repmsg;
1379         struct lustre_msg *reqmsg = req->rq_reqmsg;
1380         __u64 *versions = lustre_msg_get_versions(repmsg);
1381
1382         ENTRY;
1383         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1384                 return;
1385
1386         LASSERT(versions);
1387         lustre_msg_set_versions(reqmsg, versions);
1388         CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
1389                versions[0], versions[1]);
1390
1391         EXIT;
1392 }
1393
1394 __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1395 {
1396         struct ptlrpc_request *req;
1397
1398         assert_spin_locked(&imp->imp_lock);
1399         if (list_empty(&imp->imp_unreplied_list))
1400                 return 0;
1401
1402         req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
1403                          rq_unreplied_list);
1404         LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1405
1406         if (imp->imp_known_replied_xid < req->rq_xid - 1)
1407                 imp->imp_known_replied_xid = req->rq_xid - 1;
1408
1409         return req->rq_xid - 1;
1410 }
1411
1412 /**
1413  * Callback function called when client receives RPC reply for \a req.
1414  * Returns 0 on success or error code.
1415  * The return alue would be assigned to req->rq_status by the caller
1416  * as request processing status.
1417  * This function also decides if the request needs to be saved for later replay.
1418  */
1419 static int after_reply(struct ptlrpc_request *req)
1420 {
1421         struct obd_import *imp = req->rq_import;
1422         struct obd_device *obd = req->rq_import->imp_obd;
1423         ktime_t work_start;
1424         u64 committed;
1425         s64 timediff;
1426         int rc;
1427
1428         ENTRY;
1429         LASSERT(obd != NULL);
1430         /* repbuf must be unlinked */
1431         LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
1432
1433         if (req->rq_reply_truncated) {
1434                 if (ptlrpc_no_resend(req)) {
1435                         DEBUG_REQ(D_ERROR, req,
1436                                   "reply buffer overflow, expected=%d, actual size=%d",
1437                                   req->rq_nob_received, req->rq_repbuf_len);
1438                         RETURN(-EOVERFLOW);
1439                 }
1440
1441                 sptlrpc_cli_free_repbuf(req);
1442                 /*
1443                  * Pass the required reply buffer size (include
1444                  * space for early reply).
1445                  * NB: no need to roundup because alloc_repbuf
1446                  * will roundup it
1447                  */
1448                 req->rq_replen = req->rq_nob_received;
1449                 req->rq_nob_received = 0;
1450                 spin_lock(&req->rq_lock);
1451                 req->rq_resend       = 1;
1452                 spin_unlock(&req->rq_lock);
1453                 RETURN(0);
1454         }
1455
1456         work_start = ktime_get_real();
1457         timediff = ktime_us_delta(work_start, req->rq_sent_ns);
1458
1459         /*
1460          * NB Until this point, the whole of the incoming message,
1461          * including buflens, status etc is in the sender's byte order.
1462          */
1463         rc = sptlrpc_cli_unwrap_reply(req);
1464         if (rc) {
1465                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed: rc = %d", rc);
1466                 RETURN(rc);
1467         }
1468
1469         /*
1470          * Security layer unwrap might ask resend this request.
1471          */
1472         if (req->rq_resend)
1473                 RETURN(0);
1474
1475         rc = unpack_reply(req);
1476         if (rc)
1477                 RETURN(rc);
1478
1479         /* retry indefinitely on EINPROGRESS */
1480         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1481             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1482                 time64_t now = ktime_get_real_seconds();
1483
1484                 DEBUG_REQ((req->rq_nr_resend % 8 == 1 ? D_WARNING : 0) |
1485                           D_RPCTRACE, req, "resending request on EINPROGRESS");
1486                 spin_lock(&req->rq_lock);
1487                 req->rq_resend = 1;
1488                 spin_unlock(&req->rq_lock);
1489                 req->rq_nr_resend++;
1490
1491                 /* Readjust the timeout for current conditions */
1492                 ptlrpc_at_set_req_timeout(req);
1493                 /*
1494                  * delay resend to give a chance to the server to get ready.
1495                  * The delay is increased by 1s on every resend and is capped to
1496                  * the current request timeout (i.e. obd_timeout if AT is off,
1497                  * or AT service time x 125% + 5s, see at_est2timeout)
1498                  */
1499                 if (req->rq_nr_resend > req->rq_timeout)
1500                         req->rq_sent = now + req->rq_timeout;
1501                 else
1502                         req->rq_sent = now + req->rq_nr_resend;
1503
1504                 /* Resend for EINPROGRESS will use a new XID */
1505                 spin_lock(&imp->imp_lock);
1506                 list_del_init(&req->rq_unreplied_list);
1507                 spin_unlock(&imp->imp_lock);
1508
1509                 RETURN(0);
1510         }
1511
1512         if (obd->obd_svc_stats) {
1513                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1514                                     timediff);
1515                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1516         }
1517
1518         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1519             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1520                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1521                           lustre_msg_get_type(req->rq_repmsg));
1522                 RETURN(-EPROTO);
1523         }
1524
1525         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1526                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1527         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1528         ptlrpc_at_adj_net_latency(req,
1529                                   lustre_msg_get_service_timeout(req->rq_repmsg));
1530
1531         rc = ptlrpc_check_status(req);
1532
1533         if (rc) {
1534                 /*
1535                  * Either we've been evicted, or the server has failed for
1536                  * some reason. Try to reconnect, and if that fails, punt to
1537                  * the upcall.
1538                  */
1539                 if (ptlrpc_recoverable_error(rc)) {
1540                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1541                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1542                                 RETURN(rc);
1543                         }
1544                         ptlrpc_request_handle_notconn(req);
1545                         RETURN(rc);
1546                 }
1547         } else {
1548                 /*
1549                  * Let's look if server sent slv. Do it only for RPC with
1550                  * rc == 0.
1551                  */
1552                 ldlm_cli_update_pool(req);
1553         }
1554
1555         /*
1556          * Store transno in reqmsg for replay.
1557          */
1558         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1559                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1560                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1561         }
1562
1563         if (imp->imp_replayable) {
1564                 spin_lock(&imp->imp_lock);
1565                 /*
1566                  * No point in adding already-committed requests to the replay
1567                  * list, we will just remove them immediately. b=9829
1568                  */
1569                 if (req->rq_transno != 0 &&
1570                     (req->rq_transno >
1571                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1572                      req->rq_replay)) {
1573                         /** version recovery */
1574                         ptlrpc_save_versions(req);
1575                         ptlrpc_retain_replayable_request(req, imp);
1576                 } else if (req->rq_commit_cb &&
1577                            list_empty(&req->rq_replay_list)) {
1578                         /*
1579                          * NB: don't call rq_commit_cb if it's already on
1580                          * rq_replay_list, ptlrpc_free_committed() will call
1581                          * it later, see LU-3618 for details
1582                          */
1583                         spin_unlock(&imp->imp_lock);
1584                         req->rq_commit_cb(req);
1585                         spin_lock(&imp->imp_lock);
1586                 }
1587
1588                 /*
1589                  * Replay-enabled imports return commit-status information.
1590                  */
1591                 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1592                 if (likely(committed > imp->imp_peer_committed_transno))
1593                         imp->imp_peer_committed_transno = committed;
1594
1595                 ptlrpc_free_committed(imp);
1596
1597                 if (!list_empty(&imp->imp_replay_list)) {
1598                         struct ptlrpc_request *last;
1599
1600                         last = list_entry(imp->imp_replay_list.prev,
1601                                           struct ptlrpc_request,
1602                                           rq_replay_list);
1603                         /*
1604                          * Requests with rq_replay stay on the list even if no
1605                          * commit is expected.
1606                          */
1607                         if (last->rq_transno > imp->imp_peer_committed_transno)
1608                                 ptlrpc_pinger_commit_expected(imp);
1609                 }
1610
1611                 spin_unlock(&imp->imp_lock);
1612         }
1613
1614         RETURN(rc);
1615 }
1616
1617 /**
1618  * Helper function to send request \a req over the network for the first time
1619  * Also adjusts request phase.
1620  * Returns 0 on success or error code.
1621  */
1622 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1623 {
1624         struct obd_import *imp = req->rq_import;
1625         __u64 min_xid = 0;
1626         int rc;
1627
1628         ENTRY;
1629         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1630
1631         /* do not try to go further if there is not enough memory in enc_pool */
1632         if (req->rq_sent && req->rq_bulk)
1633                 if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
1634                     pool_is_at_full_capacity())
1635                         RETURN(-ENOMEM);
1636
1637         if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1638             (!req->rq_generation_set ||
1639              req->rq_import_generation == imp->imp_generation))
1640                 RETURN(0);
1641
1642         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1643
1644         spin_lock(&imp->imp_lock);
1645
1646         LASSERT(req->rq_xid != 0);
1647         LASSERT(!list_empty(&req->rq_unreplied_list));
1648
1649         if (!req->rq_generation_set)
1650                 req->rq_import_generation = imp->imp_generation;
1651
1652         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1653                 spin_lock(&req->rq_lock);
1654                 req->rq_waiting = 1;
1655                 spin_unlock(&req->rq_lock);
1656
1657                 DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
1658                           ptlrpc_import_state_name(req->rq_send_state),
1659                           ptlrpc_import_state_name(imp->imp_state));
1660                 LASSERT(list_empty(&req->rq_list));
1661                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1662                 atomic_inc(&req->rq_import->imp_inflight);
1663                 spin_unlock(&imp->imp_lock);
1664                 RETURN(0);
1665         }
1666
1667         if (rc != 0) {
1668                 spin_unlock(&imp->imp_lock);
1669                 req->rq_status = rc;
1670                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1671                 RETURN(rc);
1672         }
1673
1674         LASSERT(list_empty(&req->rq_list));
1675         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1676         atomic_inc(&req->rq_import->imp_inflight);
1677
1678         /*
1679          * find the known replied XID from the unreplied list, CONNECT
1680          * and DISCONNECT requests are skipped to make the sanity check
1681          * on server side happy. see process_req_last_xid().
1682          *
1683          * For CONNECT: Because replay requests have lower XID, it'll
1684          * break the sanity check if CONNECT bump the exp_last_xid on
1685          * server.
1686          *
1687          * For DISCONNECT: Since client will abort inflight RPC before
1688          * sending DISCONNECT, DISCONNECT may carry an XID which higher
1689          * than the inflight RPC.
1690          */
1691         if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1692                 min_xid = ptlrpc_known_replied_xid(imp);
1693         spin_unlock(&imp->imp_lock);
1694
1695         lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1696
1697         lustre_msg_set_status(req->rq_reqmsg, current->pid);
1698
1699         /* If the request to be sent is an LDLM callback, do not try to
1700          * refresh context.
1701          * An LDLM callback is sent by a server to a client in order to make
1702          * it release a lock, on a communication channel that uses a reverse
1703          * context. It cannot be refreshed on its own, as it is the 'reverse'
1704          * (server-side) representation of a client context.
1705          * We do not care if the reverse context is expired, and want to send
1706          * the LDLM callback anyway. Once the client receives the AST, it is
1707          * its job to refresh its own context if it has expired, hence
1708          * refreshing the associated reverse context on server side, before
1709          * being able to send the LDLM_CANCEL requested by the server.
1710          */
1711         if (lustre_msg_get_opc(req->rq_reqmsg) != LDLM_BL_CALLBACK &&
1712             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_CP_CALLBACK &&
1713             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_GL_CALLBACK)
1714                 rc = sptlrpc_req_refresh_ctx(req, 0);
1715         if (rc) {
1716                 if (req->rq_err) {
1717                         req->rq_status = rc;
1718                         RETURN(1);
1719                 } else {
1720                         spin_lock(&req->rq_lock);
1721                         req->rq_wait_ctx = 1;
1722                         spin_unlock(&req->rq_lock);
1723                         RETURN(0);
1724                 }
1725         }
1726
1727         CDEBUG(D_RPCTRACE,
1728                "Sending RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
1729                req, current->comm,
1730                imp->imp_obd->obd_uuid.uuid,
1731                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1732                obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg),
1733                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
1734
1735         rc = ptl_send_rpc(req, 0);
1736         if (rc == -ENOMEM) {
1737                 spin_lock(&imp->imp_lock);
1738                 if (!list_empty(&req->rq_list)) {
1739                         list_del_init(&req->rq_list);
1740                         if (atomic_dec_and_test(&req->rq_import->imp_inflight))
1741                                 wake_up(&req->rq_import->imp_recovery_waitq);
1742                 }
1743                 spin_unlock(&imp->imp_lock);
1744                 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1745                 RETURN(rc);
1746         }
1747         if (rc) {
1748                 DEBUG_REQ(D_HA, req, "send failed, expect timeout: rc = %d",
1749                           rc);
1750                 spin_lock(&req->rq_lock);
1751                 req->rq_net_err = 1;
1752                 spin_unlock(&req->rq_lock);
1753                 RETURN(rc);
1754         }
1755         RETURN(0);
1756 }
1757
1758 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1759 {
1760         int remaining, rc;
1761
1762         ENTRY;
1763         LASSERT(set->set_producer != NULL);
1764
1765         remaining = atomic_read(&set->set_remaining);
1766
1767         /*
1768          * populate the ->set_requests list with requests until we
1769          * reach the maximum number of RPCs in flight for this set
1770          */
1771         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1772                 rc = set->set_producer(set, set->set_producer_arg);
1773                 if (rc == -ENOENT) {
1774                         /* no more RPC to produce */
1775                         set->set_producer     = NULL;
1776                         set->set_producer_arg = NULL;
1777                         RETURN(0);
1778                 }
1779         }
1780
1781         RETURN((atomic_read(&set->set_remaining) - remaining));
1782 }
1783
1784 /**
1785  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1786  * and no more replies are expected.
1787  * (it is possible to get less replies than requests sent e.g. due to timed out
1788  * requests or requests that we had trouble to send out)
1789  *
1790  * NOTE: This function contains a potential schedule point (cond_resched()).
1791  */
1792 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1793 {
1794         struct ptlrpc_request *req, *next;
1795         LIST_HEAD(comp_reqs);
1796         int force_timer_recalc = 0;
1797
1798         ENTRY;
1799         if (atomic_read(&set->set_remaining) == 0)
1800                 RETURN(1);
1801
1802         list_for_each_entry_safe(req, next, &set->set_requests,
1803                                  rq_set_chain) {
1804                 struct obd_import *imp = req->rq_import;
1805                 int unregistered = 0;
1806                 int async = 1;
1807                 int rc = 0;
1808
1809                 if (req->rq_phase == RQ_PHASE_COMPLETE) {
1810                         list_move_tail(&req->rq_set_chain, &comp_reqs);
1811                         continue;
1812                 }
1813
1814                 /*
1815                  * This schedule point is mainly for the ptlrpcd caller of this
1816                  * function.  Most ptlrpc sets are not long-lived and unbounded
1817                  * in length, but at the least the set used by the ptlrpcd is.
1818                  * Since the processing time is unbounded, we need to insert an
1819                  * explicit schedule point to make the thread well-behaved.
1820                  */
1821                 cond_resched();
1822
1823                 /*
1824                  * If the caller requires to allow to be interpreted by force
1825                  * and it has really been interpreted, then move the request
1826                  * to RQ_PHASE_INTERPRET phase in spite of what the current
1827                  * phase is.
1828                  */
1829                 if (unlikely(req->rq_allow_intr && req->rq_intr)) {
1830                         req->rq_status = -EINTR;
1831                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1832
1833                         /*
1834                          * Since it is interpreted and we have to wait for
1835                          * the reply to be unlinked, then use sync mode.
1836                          */
1837                         async = 0;
1838
1839                         GOTO(interpret, req->rq_status);
1840                 }
1841
1842                 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
1843                         force_timer_recalc = 1;
1844
1845                 /* delayed send - skip */
1846                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1847                         continue;
1848
1849                 /* delayed resend - skip */
1850                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1851                     req->rq_sent > ktime_get_real_seconds())
1852                         continue;
1853
1854                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1855                       req->rq_phase == RQ_PHASE_BULK ||
1856                       req->rq_phase == RQ_PHASE_INTERPRET ||
1857                       req->rq_phase == RQ_PHASE_UNREG_RPC ||
1858                       req->rq_phase == RQ_PHASE_UNREG_BULK)) {
1859                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1860                         LBUG();
1861                 }
1862
1863                 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
1864                     req->rq_phase == RQ_PHASE_UNREG_BULK) {
1865                         LASSERT(req->rq_next_phase != req->rq_phase);
1866                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1867
1868                         if (req->rq_req_deadline &&
1869                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
1870                                 req->rq_req_deadline = 0;
1871                         if (req->rq_reply_deadline &&
1872                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
1873                                 req->rq_reply_deadline = 0;
1874                         if (req->rq_bulk_deadline &&
1875                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
1876                                 req->rq_bulk_deadline = 0;
1877
1878                         /*
1879                          * Skip processing until reply is unlinked. We
1880                          * can't return to pool before that and we can't
1881                          * call interpret before that. We need to make
1882                          * sure that all rdma transfers finished and will
1883                          * not corrupt any data.
1884                          */
1885                         if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
1886                             ptlrpc_cli_wait_unlink(req))
1887                                 continue;
1888                         if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
1889                             ptlrpc_client_bulk_active(req))
1890                                 continue;
1891
1892                         /*
1893                          * Turn fail_loc off to prevent it from looping
1894                          * forever.
1895                          */
1896                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1897                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1898                                                      OBD_FAIL_ONCE);
1899                         }
1900                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1901                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1902                                                      OBD_FAIL_ONCE);
1903                         }
1904
1905                         /*
1906                          * Move to next phase if reply was successfully
1907                          * unlinked.
1908                          */
1909                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1910                 }
1911
1912                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1913                         GOTO(interpret, req->rq_status);
1914
1915                 /*
1916                  * Note that this also will start async reply unlink.
1917                  */
1918                 if (req->rq_net_err && !req->rq_timedout) {
1919                         ptlrpc_expire_one_request(req, 1);
1920
1921                         /*
1922                          * Check if we still need to wait for unlink.
1923                          */
1924                         if (ptlrpc_cli_wait_unlink(req) ||
1925                             ptlrpc_client_bulk_active(req))
1926                                 continue;
1927                         /* If there is no need to resend, fail it now. */
1928                         if (req->rq_no_resend) {
1929                                 if (req->rq_status == 0)
1930                                         req->rq_status = -EIO;
1931                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1932                                 GOTO(interpret, req->rq_status);
1933                         } else {
1934                                 continue;
1935                         }
1936                 }
1937
1938                 if (req->rq_err) {
1939                         spin_lock(&req->rq_lock);
1940                         req->rq_replied = 0;
1941                         spin_unlock(&req->rq_lock);
1942                         if (req->rq_status == 0)
1943                                 req->rq_status = -EIO;
1944                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1945                         GOTO(interpret, req->rq_status);
1946                 }
1947
1948                 /*
1949                  * ptlrpc_set_wait uses l_wait_event_abortable_timeout()
1950                  * so it sets rq_intr regardless of individual rpc
1951                  * timeouts. The synchronous IO waiting path sets
1952                  * rq_intr irrespective of whether ptlrpcd
1953                  * has seen a timeout.  Our policy is to only interpret
1954                  * interrupted rpcs after they have timed out, so we
1955                  * need to enforce that here.
1956                  */
1957
1958                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
1959                                      req->rq_wait_ctx)) {
1960                         req->rq_status = -EINTR;
1961                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1962                         GOTO(interpret, req->rq_status);
1963                 }
1964
1965                 if (req->rq_phase == RQ_PHASE_RPC) {
1966                         if (req->rq_timedout || req->rq_resend ||
1967                             req->rq_waiting || req->rq_wait_ctx) {
1968                                 int status;
1969
1970                                 if (!ptlrpc_unregister_reply(req, 1)) {
1971                                         ptlrpc_unregister_bulk(req, 1);
1972                                         continue;
1973                                 }
1974
1975                                 spin_lock(&imp->imp_lock);
1976                                 if (ptlrpc_import_delay_req(imp, req,
1977                                                             &status)) {
1978                                         /*
1979                                          * put on delay list - only if we wait
1980                                          * recovery finished - before send
1981                                          */
1982                                         list_move_tail(&req->rq_list,
1983                                                        &imp->imp_delayed_list);
1984                                         spin_unlock(&imp->imp_lock);
1985                                         continue;
1986                                 }
1987
1988                                 if (status != 0)  {
1989                                         req->rq_status = status;
1990                                         ptlrpc_rqphase_move(req,
1991                                                             RQ_PHASE_INTERPRET);
1992                                         spin_unlock(&imp->imp_lock);
1993                                         GOTO(interpret, req->rq_status);
1994                                 }
1995                                 /* ignore on just initiated connections */
1996                                 if (ptlrpc_no_resend(req) &&
1997                                     !req->rq_wait_ctx &&
1998                                     imp->imp_generation !=
1999                                     imp->imp_initiated_at) {
2000                                         req->rq_status = -ENOTCONN;
2001                                         ptlrpc_rqphase_move(req,
2002                                                             RQ_PHASE_INTERPRET);
2003                                         spin_unlock(&imp->imp_lock);
2004                                         GOTO(interpret, req->rq_status);
2005                                 }
2006
2007                                 /* don't resend too fast in case of network
2008                                  * errors.
2009                                  */
2010                                 if (ktime_get_real_seconds() < (req->rq_sent + 1)
2011                                     && req->rq_net_err && req->rq_timedout) {
2012
2013                                         DEBUG_REQ(D_INFO, req,
2014                                                   "throttle request");
2015                                         /* Don't try to resend RPC right away
2016                                          * as it is likely it will fail again
2017                                          * and ptlrpc_check_set() will be
2018                                          * called again, keeping this thread
2019                                          * busy. Instead, wait for the next
2020                                          * timeout. Flag it as resend to
2021                                          * ensure we don't wait to long.
2022                                          */
2023                                         req->rq_resend = 1;
2024                                         spin_unlock(&imp->imp_lock);
2025                                         continue;
2026                                 }
2027
2028                                 list_move_tail(&req->rq_list,
2029                                                &imp->imp_sending_list);
2030
2031                                 spin_unlock(&imp->imp_lock);
2032
2033                                 spin_lock(&req->rq_lock);
2034                                 req->rq_waiting = 0;
2035                                 spin_unlock(&req->rq_lock);
2036
2037                                 if (req->rq_timedout || req->rq_resend) {
2038                                         /*
2039                                          * This is re-sending anyways,
2040                                          * let's mark req as resend.
2041                                          */
2042                                         spin_lock(&req->rq_lock);
2043                                         req->rq_resend = 1;
2044                                         spin_unlock(&req->rq_lock);
2045                                 }
2046                                 /*
2047                                  * rq_wait_ctx is only touched by ptlrpcd,
2048                                  * so no lock is needed here.
2049                                  */
2050                                 status = sptlrpc_req_refresh_ctx(req, 0);
2051                                 if (status) {
2052                                         if (req->rq_err) {
2053                                                 req->rq_status = status;
2054                                                 spin_lock(&req->rq_lock);
2055                                                 req->rq_wait_ctx = 0;
2056                                                 spin_unlock(&req->rq_lock);
2057                                                 force_timer_recalc = 1;
2058                                         } else {
2059                                                 spin_lock(&req->rq_lock);
2060                                                 req->rq_wait_ctx = 1;
2061                                                 spin_unlock(&req->rq_lock);
2062                                         }
2063
2064                                         continue;
2065                                 } else {
2066                                         spin_lock(&req->rq_lock);
2067                                         req->rq_wait_ctx = 0;
2068                                         spin_unlock(&req->rq_lock);
2069                                 }
2070
2071                                 /*
2072                                  * In any case, the previous bulk should be
2073                                  * cleaned up to prepare for the new sending
2074                                  */
2075                                 if (req->rq_bulk &&
2076                                     !ptlrpc_unregister_bulk(req, 1))
2077                                         continue;
2078
2079                                 rc = ptl_send_rpc(req, 0);
2080                                 if (rc == -ENOMEM) {
2081                                         spin_lock(&imp->imp_lock);
2082                                         if (!list_empty(&req->rq_list))
2083                                                 list_del_init(&req->rq_list);
2084                                         spin_unlock(&imp->imp_lock);
2085                                         ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
2086                                         continue;
2087                                 }
2088                                 if (rc) {
2089                                         DEBUG_REQ(D_HA, req,
2090                                                   "send failed: rc = %d", rc);
2091                                         force_timer_recalc = 1;
2092                                         spin_lock(&req->rq_lock);
2093                                         req->rq_net_err = 1;
2094                                         spin_unlock(&req->rq_lock);
2095                                         continue;
2096                                 }
2097                                 /* need to reset the timeout */
2098                                 force_timer_recalc = 1;
2099                         }
2100
2101                         spin_lock(&req->rq_lock);
2102
2103                         if (ptlrpc_client_early(req)) {
2104                                 ptlrpc_at_recv_early_reply(req);
2105                                 spin_unlock(&req->rq_lock);
2106                                 continue;
2107                         }
2108
2109                         /* Still waiting for a reply? */
2110                         if (ptlrpc_client_recv(req)) {
2111                                 spin_unlock(&req->rq_lock);
2112                                 continue;
2113                         }
2114
2115                         /* Did we actually receive a reply? */
2116                         if (!ptlrpc_client_replied(req)) {
2117                                 spin_unlock(&req->rq_lock);
2118                                 continue;
2119                         }
2120
2121                         spin_unlock(&req->rq_lock);
2122
2123                         /*
2124                          * unlink from net because we are going to
2125                          * swab in-place of reply buffer
2126                          */
2127                         unregistered = ptlrpc_unregister_reply(req, 1);
2128                         if (!unregistered)
2129                                 continue;
2130
2131                         req->rq_status = after_reply(req);
2132                         if (req->rq_resend)
2133                                 continue;
2134
2135                         /*
2136                          * If there is no bulk associated with this request,
2137                          * then we're done and should let the interpreter
2138                          * process the reply. Similarly if the RPC returned
2139                          * an error, and therefore the bulk will never arrive.
2140                          */
2141                         if (!req->rq_bulk || req->rq_status < 0) {
2142                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2143                                 GOTO(interpret, req->rq_status);
2144                         }
2145
2146                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
2147                 }
2148
2149                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
2150                 if (ptlrpc_client_bulk_active(req))
2151                         continue;
2152
2153                 if (req->rq_bulk->bd_failure) {
2154                         /*
2155                          * The RPC reply arrived OK, but the bulk screwed
2156                          * up!  Dead weird since the server told us the RPC
2157                          * was good after getting the REPLY for her GET or
2158                          * the ACK for her PUT.
2159                          */
2160                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
2161                         req->rq_status = -EIO;
2162                 }
2163
2164                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2165
2166 interpret:
2167                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
2168
2169                 /*
2170                  * This moves to "unregistering" phase we need to wait for
2171                  * reply unlink.
2172                  */
2173                 if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
2174                         /* start async bulk unlink too */
2175                         ptlrpc_unregister_bulk(req, 1);
2176                         continue;
2177                 }
2178
2179                 if (!ptlrpc_unregister_bulk(req, async))
2180                         continue;
2181
2182                 /*
2183                  * When calling interpret receiving already should be
2184                  * finished.
2185                  */
2186                 LASSERT(!req->rq_receiving_reply);
2187
2188                 ptlrpc_req_interpret(env, req, req->rq_status);
2189
2190                 if (ptlrpcd_check_work(req)) {
2191                         atomic_dec(&set->set_remaining);
2192                         continue;
2193                 }
2194                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
2195
2196                 if (req->rq_reqmsg)
2197                         CDEBUG(D_RPCTRACE,
2198                                "Completed RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
2199                                req, current->comm,
2200                                imp->imp_obd->obd_uuid.uuid,
2201                                lustre_msg_get_status(req->rq_reqmsg),
2202                                req->rq_xid,
2203                                obd_import_nid2str(imp),
2204                                lustre_msg_get_opc(req->rq_reqmsg),
2205                                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
2206
2207                 spin_lock(&imp->imp_lock);
2208                 /*
2209                  * Request already may be not on sending or delaying list. This
2210                  * may happen in the case of marking it erroneous for the case
2211                  * ptlrpc_import_delay_req(req, status) find it impossible to
2212                  * allow sending this rpc and returns *status != 0.
2213                  */
2214                 if (!list_empty(&req->rq_list)) {
2215                         list_del_init(&req->rq_list);
2216                         if (atomic_dec_and_test(&imp->imp_inflight))
2217                                 wake_up(&imp->imp_recovery_waitq);
2218                 }
2219                 list_del_init(&req->rq_unreplied_list);
2220                 spin_unlock(&imp->imp_lock);
2221
2222                 atomic_dec(&set->set_remaining);
2223                 wake_up(&imp->imp_recovery_waitq);
2224
2225                 if (set->set_producer) {
2226                         /* produce a new request if possible */
2227                         if (ptlrpc_set_producer(set) > 0)
2228                                 force_timer_recalc = 1;
2229
2230                         /*
2231                          * free the request that has just been completed
2232                          * in order not to pollute set->set_requests
2233                          */
2234                         list_del_init(&req->rq_set_chain);
2235                         spin_lock(&req->rq_lock);
2236                         req->rq_set = NULL;
2237                         req->rq_invalid_rqset = 0;
2238                         spin_unlock(&req->rq_lock);
2239
2240                         /* record rq_status to compute the final status later */
2241                         if (req->rq_status != 0)
2242                                 set->set_rc = req->rq_status;
2243                         ptlrpc_req_finished(req);
2244                 } else {
2245                         list_move_tail(&req->rq_set_chain, &comp_reqs);
2246                 }
2247         }
2248
2249         /*
2250          * move completed request at the head of list so it's easier for
2251          * caller to find them
2252          */
2253         list_splice(&comp_reqs, &set->set_requests);
2254
2255         /* If we hit an error, we want to recover promptly. */
2256         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
2257 }
2258 EXPORT_SYMBOL(ptlrpc_check_set);
2259
2260 /**
2261  * Time out request \a req. is \a async_unlink is set, that means do not wait
2262  * until LNet actually confirms network buffer unlinking.
2263  * Return 1 if we should give up further retrying attempts or 0 otherwise.
2264  */
2265 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
2266 {
2267         struct obd_import *imp = req->rq_import;
2268         unsigned int debug_mask = D_RPCTRACE;
2269         int rc = 0;
2270
2271         ENTRY;
2272         spin_lock(&req->rq_lock);
2273         req->rq_timedout = 1;
2274         spin_unlock(&req->rq_lock);
2275
2276         if (ptlrpc_console_allow(req, lustre_msg_get_opc(req->rq_reqmsg),
2277                                  lustre_msg_get_status(req->rq_reqmsg)))
2278                 debug_mask = D_WARNING;
2279         DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
2280                   req->rq_net_err ? "failed due to network error" :
2281                      ((req->rq_real_sent == 0 ||
2282                        req->rq_real_sent < req->rq_sent ||
2283                        req->rq_real_sent >= req->rq_deadline) ?
2284                       "timed out for sent delay" : "timed out for slow reply"),
2285                   req->rq_sent, req->rq_real_sent);
2286
2287         if (imp && obd_debug_peer_on_timeout)
2288                 LNetDebugPeer(imp->imp_connection->c_peer);
2289
2290         ptlrpc_unregister_reply(req, async_unlink);
2291         ptlrpc_unregister_bulk(req, async_unlink);
2292
2293         if (obd_dump_on_timeout)
2294                 libcfs_debug_dumplog();
2295
2296         if (!imp) {
2297                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
2298                 RETURN(1);
2299         }
2300
2301         atomic_inc(&imp->imp_timeouts);
2302
2303         /* The DLM server doesn't want recovery run on its imports. */
2304         if (imp->imp_dlm_fake)
2305                 RETURN(1);
2306
2307         /*
2308          * If this request is for recovery or other primordial tasks,
2309          * then error it out here.
2310          */
2311         if (req->rq_ctx_init || req->rq_ctx_fini ||
2312             req->rq_send_state != LUSTRE_IMP_FULL ||
2313             imp->imp_obd->obd_no_recov) {
2314                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
2315                           ptlrpc_import_state_name(req->rq_send_state),
2316                           ptlrpc_import_state_name(imp->imp_state));
2317                 spin_lock(&req->rq_lock);
2318                 req->rq_status = -ETIMEDOUT;
2319                 req->rq_err = 1;
2320                 spin_unlock(&req->rq_lock);
2321                 RETURN(1);
2322         }
2323
2324         /*
2325          * if a request can't be resent we can't wait for an answer after
2326          * the timeout
2327          */
2328         if (ptlrpc_no_resend(req)) {
2329                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
2330                 rc = 1;
2331         }
2332
2333         ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
2334
2335         RETURN(rc);
2336 }
2337
2338 /**
2339  * Time out all uncompleted requests in request set pointed by \a data
2340  * This is called when a wait times out.
2341  */
2342 void ptlrpc_expired_set(struct ptlrpc_request_set *set)
2343 {
2344         struct ptlrpc_request *req;
2345         time64_t now = ktime_get_real_seconds();
2346
2347         ENTRY;
2348         LASSERT(set != NULL);
2349
2350         /*
2351          * A timeout expired. See which reqs it applies to...
2352          */
2353         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2354                 /* don't expire request waiting for context */
2355                 if (req->rq_wait_ctx)
2356                         continue;
2357
2358                 /* Request in-flight? */
2359                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2360                        !req->rq_waiting && !req->rq_resend) ||
2361                       (req->rq_phase == RQ_PHASE_BULK)))
2362                         continue;
2363
2364                 if (req->rq_timedout ||     /* already dealt with */
2365                     req->rq_deadline > now) /* not expired */
2366                         continue;
2367
2368                 /*
2369                  * Deal with this guy. Do it asynchronously to not block
2370                  * ptlrpcd thread.
2371                  */
2372                 ptlrpc_expire_one_request(req, 1);
2373                 /*
2374                  * Loops require that we resched once in a while to avoid
2375                  * RCU stalls and a few other problems.
2376                  */
2377                 cond_resched();
2378
2379         }
2380 }
2381
2382 /**
2383  * Interrupts (sets interrupted flag) all uncompleted requests in
2384  * a set \a data. This is called when a wait_event is interrupted
2385  * by a signal.
2386  */
2387 static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set)
2388 {
2389         struct ptlrpc_request *req;
2390
2391         LASSERT(set != NULL);
2392         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2393
2394         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2395                 if (req->rq_intr)
2396                         continue;
2397
2398                 if (req->rq_phase != RQ_PHASE_RPC &&
2399                     req->rq_phase != RQ_PHASE_UNREG_RPC &&
2400                     !req->rq_allow_intr)
2401                         continue;
2402
2403                 spin_lock(&req->rq_lock);
2404                 req->rq_intr = 1;
2405                 spin_unlock(&req->rq_lock);
2406         }
2407 }
2408
2409 /**
2410  * Get the smallest timeout in the set; this does NOT set a timeout.
2411  */
2412 time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2413 {
2414         time64_t now = ktime_get_real_seconds();
2415         int timeout = 0;
2416         struct ptlrpc_request *req;
2417         time64_t deadline;
2418
2419         ENTRY;
2420         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2421                 /* Request in-flight? */
2422                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2423                       (req->rq_phase == RQ_PHASE_BULK) ||
2424                       (req->rq_phase == RQ_PHASE_NEW)))
2425                         continue;
2426
2427                 /* Already timed out. */
2428                 if (req->rq_timedout)
2429                         continue;
2430
2431                 /* Waiting for ctx. */
2432                 if (req->rq_wait_ctx)
2433                         continue;
2434
2435                 if (req->rq_phase == RQ_PHASE_NEW)
2436                         deadline = req->rq_sent;
2437                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2438                         deadline = req->rq_sent;
2439                 else
2440                         deadline = req->rq_sent + req->rq_timeout;
2441
2442                 if (deadline <= now)    /* actually expired already */
2443                         timeout = 1;    /* ASAP */
2444                 else if (timeout == 0 || timeout > deadline - now)
2445                         timeout = deadline - now;
2446         }
2447         RETURN(timeout);
2448 }
2449
2450 /**
2451  * Send all unset request from the set and then wait untill all
2452  * requests in the set complete (either get a reply, timeout, get an
2453  * error or otherwise be interrupted).
2454  * Returns 0 on success or error code otherwise.
2455  */
2456 int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set)
2457 {
2458         struct ptlrpc_request *req;
2459         time64_t timeout;
2460         int rc;
2461
2462         ENTRY;
2463         if (set->set_producer)
2464                 (void)ptlrpc_set_producer(set);
2465         else
2466                 list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2467                         if (req->rq_phase == RQ_PHASE_NEW)
2468                                 (void)ptlrpc_send_new_req(req);
2469                 }
2470
2471         if (list_empty(&set->set_requests))
2472                 RETURN(0);
2473
2474         do {
2475                 timeout = ptlrpc_set_next_timeout(set);
2476
2477                 /*
2478                  * wait until all complete, interrupted, or an in-flight
2479                  * req times out
2480                  */
2481                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
2482                        set, timeout);
2483
2484                 if ((timeout == 0 && !signal_pending(current)) ||
2485                     set->set_allow_intr) {
2486                         /*
2487                          * No requests are in-flight (ether timed out
2488                          * or delayed), so we can allow interrupts.
2489                          * We still want to block for a limited time,
2490                          * so we allow interrupts during the timeout.
2491                          */
2492                         rc = l_wait_event_abortable_timeout(
2493                                 set->set_waitq,
2494                                 ptlrpc_check_set(NULL, set),
2495                                 cfs_time_seconds(timeout ? timeout : 1));
2496                         if (rc == 0) {
2497                                 rc = -ETIMEDOUT;
2498                                 ptlrpc_expired_set(set);
2499                         } else if (rc < 0) {
2500                                 rc = -EINTR;
2501                                 ptlrpc_interrupted_set(set);
2502                         } else {
2503                                 rc = 0;
2504                         }
2505                 } else {
2506                         /*
2507                          * At least one request is in flight, so no
2508                          * interrupts are allowed. Wait until all
2509                          * complete, or an in-flight req times out.
2510                          */
2511                         rc = wait_event_idle_timeout(
2512                                 set->set_waitq,
2513                                 ptlrpc_check_set(NULL, set),
2514                                 cfs_time_seconds(timeout ? timeout : 1));
2515                         if (rc == 0) {
2516                                 ptlrpc_expired_set(set);
2517                                 rc = -ETIMEDOUT;
2518                         } else {
2519                                 rc = 0;
2520                         }
2521
2522                         /*
2523                          * LU-769 - if we ignored the signal because
2524                          * it was already pending when we started, we
2525                          * need to handle it now or we risk it being
2526                          * ignored forever
2527                          */
2528                         if (rc == -ETIMEDOUT &&
2529                             signal_pending(current)) {
2530                                 sigset_t old, new;
2531
2532                                 siginitset(&new, LUSTRE_FATAL_SIGS);
2533                                 sigprocmask(SIG_BLOCK, &new, &old);
2534                                 /*
2535                                  * In fact we only interrupt for the
2536                                  * "fatal" signals like SIGINT or
2537                                  * SIGKILL. We still ignore less
2538                                  * important signals since ptlrpc set
2539                                  * is not easily reentrant from
2540                                  * userspace again
2541                                  */
2542                                 if (signal_pending(current))
2543                                         ptlrpc_interrupted_set(set);
2544                                 sigprocmask(SIG_SETMASK, &old, NULL);
2545                         }
2546                 }
2547
2548                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2549
2550                 /*
2551                  * -EINTR => all requests have been flagged rq_intr so next
2552                  * check completes.
2553                  * -ETIMEDOUT => someone timed out.  When all reqs have
2554                  * timed out, signals are enabled allowing completion with
2555                  * EINTR.
2556                  * I don't really care if we go once more round the loop in
2557                  * the error cases -eeb.
2558                  */
2559                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2560                         list_for_each_entry(req, &set->set_requests,
2561                                             rq_set_chain) {
2562                                 spin_lock(&req->rq_lock);
2563                                 req->rq_invalid_rqset = 1;
2564                                 spin_unlock(&req->rq_lock);
2565                         }
2566                 }
2567         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2568
2569         LASSERT(atomic_read(&set->set_remaining) == 0);
2570
2571         rc = set->set_rc; /* rq_status of already freed requests if any */
2572         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2573                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2574                 if (req->rq_status != 0)
2575                         rc = req->rq_status;
2576         }
2577
2578         RETURN(rc);
2579 }
2580 EXPORT_SYMBOL(ptlrpc_set_wait);
2581
2582 /**
2583  * Helper fuction for request freeing.
2584  * Called when request count reached zero and request needs to be freed.
2585  * Removes request from all sorts of sending/replay lists it might be on,
2586  * frees network buffers if any are present.
2587  * If \a locked is set, that means caller is already holding import imp_lock
2588  * and so we no longer need to reobtain it (for certain lists manipulations)
2589  */
2590 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2591 {
2592         ENTRY;
2593
2594         if (!request)
2595                 RETURN_EXIT;
2596
2597         LASSERT(!request->rq_srv_req);
2598         LASSERT(request->rq_export == NULL);
2599         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
2600         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
2601         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
2602         LASSERTF(!request->rq_replay, "req %p\n", request);
2603
2604         req_capsule_fini(&request->rq_pill);
2605
2606         /*
2607          * We must take it off the imp_replay_list first.  Otherwise, we'll set
2608          * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2609          */
2610         if (request->rq_import) {
2611                 if (!locked)
2612                         spin_lock(&request->rq_import->imp_lock);
2613                 list_del_init(&request->rq_replay_list);
2614                 list_del_init(&request->rq_unreplied_list);
2615                 if (!locked)
2616                         spin_unlock(&request->rq_import->imp_lock);
2617         }
2618         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
2619
2620         if (atomic_read(&request->rq_refcount) != 0) {
2621                 DEBUG_REQ(D_ERROR, request,
2622                           "freeing request with nonzero refcount");
2623                 LBUG();
2624         }
2625
2626         if (request->rq_repbuf)
2627                 sptlrpc_cli_free_repbuf(request);
2628
2629         if (request->rq_import) {
2630                 if (!ptlrpcd_check_work(request)) {
2631                         LASSERT(atomic_read(&request->rq_import->imp_reqs) > 0);
2632                         atomic_dec(&request->rq_import->imp_reqs);
2633                 }
2634                 class_import_put(request->rq_import);
2635                 request->rq_import = NULL;
2636         }
2637         if (request->rq_bulk)
2638                 ptlrpc_free_bulk(request->rq_bulk);
2639
2640         if (request->rq_reqbuf || request->rq_clrbuf)
2641                 sptlrpc_cli_free_reqbuf(request);
2642
2643         if (request->rq_cli_ctx)
2644                 sptlrpc_req_put_ctx(request, !locked);
2645
2646         if (request->rq_pool)
2647                 __ptlrpc_free_req_to_pool(request);
2648         else
2649                 ptlrpc_request_cache_free(request);
2650         EXIT;
2651 }
2652
2653 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
2654 /**
2655  * Drop one request reference. Must be called with import imp_lock held.
2656  * When reference count drops to zero, request is freed.
2657  */
2658 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
2659 {
2660         assert_spin_locked(&request->rq_import->imp_lock);
2661         (void)__ptlrpc_req_finished(request, 1);
2662 }
2663
2664 /**
2665  * Helper function
2666  * Drops one reference count for request \a request.
2667  * \a locked set indicates that caller holds import imp_lock.
2668  * Frees the request whe reference count reaches zero.
2669  *
2670  * \retval 1    the request is freed
2671  * \retval 0    some others still hold references on the request
2672  */
2673 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
2674 {
2675         int count;
2676
2677         ENTRY;
2678         if (!request)
2679                 RETURN(1);
2680
2681         LASSERT(request != LP_POISON);
2682         LASSERT(request->rq_reqmsg != LP_POISON);
2683
2684         DEBUG_REQ(D_INFO, request, "refcount now %u",
2685                   atomic_read(&request->rq_refcount) - 1);
2686
2687         spin_lock(&request->rq_lock);
2688         count = atomic_dec_return(&request->rq_refcount);
2689         LASSERTF(count >= 0, "Invalid ref count %d\n", count);
2690
2691         /*
2692          * For open RPC, the client does not know the EA size (LOV, ACL, and
2693          * so on) before replied, then the client has to reserve very large
2694          * reply buffer. Such buffer will not be released until the RPC freed.
2695          * Since The open RPC is replayable, we need to keep it in the replay
2696          * list until close. If there are a lot of files opened concurrently,
2697          * then the client may be OOM.
2698          *
2699          * If fact, it is unnecessary to keep reply buffer for open replay,
2700          * related EAs have already been saved via mdc_save_lovea() before
2701          * coming here. So it is safe to free the reply buffer some earlier
2702          * before releasing the RPC to avoid client OOM. LU-9514
2703          */
2704         if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
2705                 spin_lock(&request->rq_early_free_lock);
2706                 sptlrpc_cli_free_repbuf(request);
2707                 request->rq_repbuf = NULL;
2708                 request->rq_repbuf_len = 0;
2709                 request->rq_repdata = NULL;
2710                 request->rq_reqdata_len = 0;
2711                 spin_unlock(&request->rq_early_free_lock);
2712         }
2713         spin_unlock(&request->rq_lock);
2714
2715         if (!count)
2716                 __ptlrpc_free_req(request, locked);
2717
2718         RETURN(!count);
2719 }
2720
2721 /**
2722  * Drops one reference count for a request.
2723  */
2724 void ptlrpc_req_finished(struct ptlrpc_request *request)
2725 {
2726         __ptlrpc_req_finished(request, 0);
2727 }
2728 EXPORT_SYMBOL(ptlrpc_req_finished);
2729
2730 /**
2731  * Returns xid of a \a request
2732  */
2733 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2734 {
2735         return request->rq_xid;
2736 }
2737 EXPORT_SYMBOL(ptlrpc_req_xid);
2738
2739 /**
2740  * Disengage the client's reply buffer from the network
2741  * NB does _NOT_ unregister any client-side bulk.
2742  * IDEMPOTENT, but _not_ safe against concurrent callers.
2743  * The request owner (i.e. the thread doing the I/O) must call...
2744  * Returns 0 on success or 1 if unregistering cannot be made.
2745  */
2746 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2747 {
2748         bool discard = false;
2749         /*
2750          * Might sleep.
2751          */
2752         LASSERT(!in_interrupt());
2753
2754         /* Let's setup deadline for reply unlink. */
2755         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2756             async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
2757                 request->rq_reply_deadline = ktime_get_real_seconds() +
2758                                              PTLRPC_REQ_LONG_UNLINK;
2759
2760         /*
2761          * Nothing left to do.
2762          */
2763         if (!__ptlrpc_cli_wait_unlink(request, &discard))
2764                 RETURN(1);
2765
2766         LNetMDUnlink(request->rq_reply_md_h);
2767
2768         if (discard) /* Discard the request-out callback */
2769                 __LNetMDUnlink(request->rq_req_md_h, discard);
2770
2771         /*
2772          * Let's check it once again.
2773          */
2774         if (!ptlrpc_cli_wait_unlink(request))
2775                 RETURN(1);
2776
2777         /* Move to "Unregistering" phase as reply was not unlinked yet. */
2778         ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
2779
2780         /*
2781          * Do not wait for unlink to finish.
2782          */
2783         if (async)
2784                 RETURN(0);
2785
2786         /*
2787          * We have to wait_event_idle_timeout() whatever the result, to get
2788          * a chance to run reply_in_callback(), and to make sure we've
2789          * unlinked before returning a req to the pool.
2790          */
2791         for (;;) {
2792                 wait_queue_head_t *wq = (request->rq_set) ?
2793                                         &request->rq_set->set_waitq :
2794                                         &request->rq_reply_waitq;
2795                 int seconds = PTLRPC_REQ_LONG_UNLINK;
2796                 /*
2797                  * Network access will complete in finite time but the HUGE
2798                  * timeout lets us CWARN for visibility of sluggish NALs
2799                  */
2800                 while (seconds > 0 &&
2801                        wait_event_idle_timeout(
2802                                *wq,
2803                                !ptlrpc_cli_wait_unlink(request),
2804                                cfs_time_seconds(1)) == 0)
2805                         seconds -= 1;
2806                 if (seconds > 0) {
2807                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2808                         RETURN(1);
2809                 }
2810
2811                 DEBUG_REQ(D_WARNING, request,
2812                           "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2813                           request->rq_receiving_reply,
2814                           request->rq_req_unlinked,
2815                           request->rq_reply_unlinked);
2816         }
2817         RETURN(0);
2818 }
2819
2820 static void ptlrpc_free_request(struct ptlrpc_request *req)
2821 {
2822         spin_lock(&req->rq_lock);
2823         req->rq_replay = 0;
2824         spin_unlock(&req->rq_lock);
2825
2826         if (req->rq_commit_cb)
2827                 req->rq_commit_cb(req);
2828         list_del_init(&req->rq_replay_list);
2829
2830         __ptlrpc_req_finished(req, 1);
2831 }
2832
2833 /**
2834  * the request is committed and dropped from the replay list of its import
2835  */
2836 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2837 {
2838         struct obd_import *imp = req->rq_import;
2839
2840         spin_lock(&imp->imp_lock);
2841         if (list_empty(&req->rq_replay_list)) {
2842                 spin_unlock(&imp->imp_lock);
2843                 return;
2844         }
2845
2846         if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
2847                 if (imp->imp_replay_cursor == &req->rq_replay_list)
2848                         imp->imp_replay_cursor = req->rq_replay_list.next;
2849                 ptlrpc_free_request(req);
2850         }
2851
2852         spin_unlock(&imp->imp_lock);
2853 }
2854 EXPORT_SYMBOL(ptlrpc_request_committed);
2855
2856 /**
2857  * Iterates through replay_list on import and prunes
2858  * all requests have transno smaller than last_committed for the
2859  * import and don't have rq_replay set.
2860  * Since requests are sorted in transno order, stops when meetign first
2861  * transno bigger than last_committed.
2862  * caller must hold imp->imp_lock
2863  */
2864 void ptlrpc_free_committed(struct obd_import *imp)
2865 {
2866         struct ptlrpc_request *req, *saved;
2867         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
2868         bool skip_committed_list = true;
2869
2870         ENTRY;
2871         LASSERT(imp != NULL);
2872         assert_spin_locked(&imp->imp_lock);
2873
2874         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
2875             imp->imp_generation == imp->imp_last_generation_checked) {
2876                 CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
2877                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
2878                 RETURN_EXIT;
2879         }
2880         CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
2881                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
2882                imp->imp_generation);
2883
2884         if (imp->imp_generation != imp->imp_last_generation_checked ||
2885             imp->imp_last_transno_checked == 0)
2886                 skip_committed_list = false;
2887
2888         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
2889         imp->imp_last_generation_checked = imp->imp_generation;
2890
2891         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2892                                  rq_replay_list) {
2893                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2894                 LASSERT(req != last_req);
2895                 last_req = req;
2896
2897                 if (req->rq_transno == 0) {
2898                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2899                         LBUG();
2900                 }
2901                 if (req->rq_import_generation < imp->imp_generation) {
2902                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2903                         GOTO(free_req, 0);
2904                 }
2905
2906                 /* not yet committed */
2907                 if (req->rq_transno > imp->imp_peer_committed_transno) {
2908                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2909                         break;
2910                 }
2911
2912                 if (req->rq_replay) {
2913                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
2914                         list_move_tail(&req->rq_replay_list,
2915                                        &imp->imp_committed_list);
2916                         continue;
2917                 }
2918
2919                 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
2920                           imp->imp_peer_committed_transno);
2921 free_req:
2922                 ptlrpc_free_request(req);
2923         }
2924
2925         if (skip_committed_list)
2926                 GOTO(out, 0);
2927
2928         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
2929                                  rq_replay_list) {
2930                 LASSERT(req->rq_transno != 0);
2931                 if (req->rq_import_generation < imp->imp_generation ||
2932                     !req->rq_replay) {
2933                         DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
2934                                   req->rq_import_generation <
2935                                   imp->imp_generation ? "stale" : "closed");
2936
2937                         if (imp->imp_replay_cursor == &req->rq_replay_list)
2938                                 imp->imp_replay_cursor =
2939                                         req->rq_replay_list.next;
2940
2941                         ptlrpc_free_request(req);
2942                 }
2943         }
2944 out:
2945         EXIT;
2946 }
2947
2948 void ptlrpc_cleanup_client(struct obd_import *imp)
2949 {
2950         ENTRY;
2951         EXIT;
2952 }
2953
2954 /**
2955  * Schedule previously sent request for resend.
2956  * For bulk requests we assign new xid (to avoid problems with
2957  * lost replies and therefore several transfers landing into same buffer
2958  * from different sending attempts).
2959  */
2960 void ptlrpc_resend_req(struct ptlrpc_request *req)
2961 {
2962         DEBUG_REQ(D_HA, req, "going to resend");
2963         spin_lock(&req->rq_lock);
2964
2965         /*
2966          * Request got reply but linked to the import list still.
2967          * Let ptlrpc_check_set() process it.
2968          */
2969         if (ptlrpc_client_replied(req)) {
2970                 spin_unlock(&req->rq_lock);
2971                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
2972                 return;
2973         }
2974
2975         req->rq_status = -EAGAIN;
2976
2977         req->rq_resend = 1;
2978         req->rq_net_err = 0;
2979         req->rq_timedout = 0;
2980
2981         ptlrpc_client_wake_req(req);
2982         spin_unlock(&req->rq_lock);
2983 }
2984
2985 /* XXX: this function and rq_status are currently unused */
2986 void ptlrpc_restart_req(struct ptlrpc_request *req)
2987 {
2988         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
2989         req->rq_status = -ERESTARTSYS;
2990
2991         spin_lock(&req->rq_lock);
2992         req->rq_restart = 1;
2993         req->rq_timedout = 0;
2994         ptlrpc_client_wake_req(req);
2995         spin_unlock(&req->rq_lock);
2996 }
2997
2998 /**
2999  * Grab additional reference on a request \a req
3000  */
3001 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
3002 {
3003         ENTRY;
3004         atomic_inc(&req->rq_refcount);
3005         RETURN(req);
3006 }
3007 EXPORT_SYMBOL(ptlrpc_request_addref);
3008
3009 /**
3010  * Add a request to import replay_list.
3011  * Must be called under imp_lock
3012  */
3013 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
3014                                       struct obd_import *imp)
3015 {
3016         struct ptlrpc_request *iter;
3017
3018         assert_spin_locked(&imp->imp_lock);
3019
3020         if (req->rq_transno == 0) {
3021                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
3022                 LBUG();
3023         }
3024
3025         /*
3026          * clear this for new requests that were resent as well
3027          * as resent replayed requests.
3028          */
3029         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3030
3031         /* don't re-add requests that have been replayed */
3032         if (!list_empty(&req->rq_replay_list))
3033                 return;
3034
3035         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
3036
3037         spin_lock(&req->rq_lock);
3038         req->rq_resend = 0;
3039         spin_unlock(&req->rq_lock);
3040
3041         LASSERT(imp->imp_replayable);
3042         /* Balanced in ptlrpc_free_committed, usually. */
3043         ptlrpc_request_addref(req);
3044         list_for_each_entry_reverse(iter, &imp->imp_replay_list,
3045                                     rq_replay_list) {
3046                 /*
3047                  * We may have duplicate transnos if we create and then
3048                  * open a file, or for closes retained if to match creating
3049                  * opens, so use req->rq_xid as a secondary key.
3050                  * (See bugs 684, 685, and 428.)
3051                  * XXX no longer needed, but all opens need transnos!
3052                  */
3053                 if (iter->rq_transno > req->rq_transno)
3054                         continue;
3055
3056                 if (iter->rq_transno == req->rq_transno) {
3057                         LASSERT(iter->rq_xid != req->rq_xid);
3058                         if (iter->rq_xid > req->rq_xid)
3059                                 continue;
3060                 }
3061
3062                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
3063                 return;
3064         }
3065
3066         list_add(&req->rq_replay_list, &imp->imp_replay_list);
3067 }
3068
3069 /**
3070  * Send request and wait until it completes.
3071  * Returns request processing status.
3072  */
3073 int ptlrpc_queue_wait(struct ptlrpc_request *req)
3074 {
3075         struct ptlrpc_request_set *set;
3076         int rc;
3077
3078         ENTRY;
3079         LASSERT(req->rq_set == NULL);
3080         LASSERT(!req->rq_receiving_reply);
3081
3082         set = ptlrpc_prep_set();
3083         if (!set) {
3084                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
3085                 RETURN(-ENOMEM);
3086         }
3087
3088         /* for distributed debugging */
3089         lustre_msg_set_status(req->rq_reqmsg, current->pid);
3090
3091         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
3092         ptlrpc_request_addref(req);
3093         ptlrpc_set_add_req(set, req);
3094         rc = ptlrpc_set_wait(NULL, set);
3095         ptlrpc_set_destroy(set);
3096
3097         RETURN(rc);
3098 }
3099 EXPORT_SYMBOL(ptlrpc_queue_wait);
3100
3101 /**
3102  * Callback used for replayed requests reply processing.
3103  * In case of successful reply calls registered request replay callback.
3104  * In case of error restart replay process.
3105  */
3106 static int ptlrpc_replay_interpret(const struct lu_env *env,
3107                                    struct ptlrpc_request *req,
3108                                    void *args, int rc)
3109 {
3110         struct ptlrpc_replay_async_args *aa = args;
3111         struct obd_import *imp = req->rq_import;
3112
3113         ENTRY;
3114         atomic_dec(&imp->imp_replay_inflight);
3115
3116         /*
3117          * Note: if it is bulk replay (MDS-MDS replay), then even if
3118          * server got the request, but bulk transfer timeout, let's
3119          * replay the bulk req again
3120          */
3121         if (!ptlrpc_client_replied(req) ||
3122             (req->rq_bulk &&
3123              lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
3124                 DEBUG_REQ(D_ERROR, req, "request replay timed out");
3125                 GOTO(out, rc = -ETIMEDOUT);
3126         }
3127
3128         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
3129             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
3130             lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
3131                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
3132
3133         /** VBR: check version failure */
3134         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
3135                 /** replay was failed due to version mismatch */
3136                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay");
3137                 spin_lock(&imp->imp_lock);
3138                 imp->imp_vbr_failed = 1;
3139                 spin_unlock(&imp->imp_lock);
3140                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3141         } else {
3142                 /** The transno had better not change over replay. */
3143                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
3144                          lustre_msg_get_transno(req->rq_repmsg) ||
3145                          lustre_msg_get_transno(req->rq_repmsg) == 0,
3146                          "%#llx/%#llx\n",
3147                          lustre_msg_get_transno(req->rq_reqmsg),
3148                          lustre_msg_get_transno(req->rq_repmsg));
3149         }
3150
3151         spin_lock(&imp->imp_lock);
3152         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
3153         spin_unlock(&imp->imp_lock);
3154         LASSERT(imp->imp_last_replay_transno);
3155
3156         /* transaction number shouldn't be bigger than the latest replayed */
3157         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
3158                 DEBUG_REQ(D_ERROR, req,
3159                           "Reported transno=%llu is bigger than replayed=%llu",
3160                           req->rq_transno,
3161                           lustre_msg_get_transno(req->rq_reqmsg));
3162                 GOTO(out, rc = -EINVAL);
3163         }
3164
3165         DEBUG_REQ(D_HA, req, "got reply");
3166
3167         /* let the callback do fixups, possibly including in the request */
3168         if (req->rq_replay_cb)
3169                 req->rq_replay_cb(req);
3170
3171         if (ptlrpc_client_replied(req) &&
3172             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
3173                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
3174                           lustre_msg_get_status(req->rq_repmsg),
3175                           aa->praa_old_status);
3176
3177                 /*
3178                  * Note: If the replay fails for MDT-MDT recovery, let's
3179                  * abort all of the following requests in the replay
3180                  * and sending list, because MDT-MDT update requests
3181                  * are dependent on each other, see LU-7039
3182                  */
3183                 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
3184                         struct ptlrpc_request *free_req;
3185                         struct ptlrpc_request *tmp;
3186
3187                         spin_lock(&imp->imp_lock);
3188                         list_for_each_entry_safe(free_req, tmp,
3189                                                  &imp->imp_replay_list,
3190                                                  rq_replay_list) {
3191                                 ptlrpc_free_request(free_req);
3192                         }
3193
3194                         list_for_each_entry_safe(free_req, tmp,
3195                                                  &imp->imp_committed_list,
3196                                                  rq_replay_list) {
3197                                 ptlrpc_free_request(free_req);
3198                         }
3199
3200                         list_for_each_entry_safe(free_req, tmp,
3201                                                  &imp->imp_delayed_list,
3202                                                  rq_list) {
3203                                 spin_lock(&free_req->rq_lock);
3204                                 free_req->rq_err = 1;
3205                                 free_req->rq_status = -EIO;
3206                                 ptlrpc_client_wake_req(free_req);
3207                                 spin_unlock(&free_req->rq_lock);
3208                         }
3209
3210                         list_for_each_entry_safe(free_req, tmp,
3211                                                  &imp->imp_sending_list,
3212                                                  rq_list) {
3213                                 spin_lock(&free_req->rq_lock);
3214                                 free_req->rq_err = 1;
3215                                 free_req->rq_status = -EIO;
3216                                 ptlrpc_client_wake_req(free_req);
3217                                 spin_unlock(&free_req->rq_lock);
3218                         }
3219                         spin_unlock(&imp->imp_lock);
3220                 }
3221         } else {
3222                 /* Put it back for re-replay. */
3223                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3224         }
3225
3226         /*
3227          * Errors while replay can set transno to 0, but
3228          * imp_last_replay_transno shouldn't be set to 0 anyway
3229          */
3230         if (req->rq_transno == 0)
3231                 CERROR("Transno is 0 during replay!\n");
3232
3233         /* continue with recovery */
3234         rc = ptlrpc_import_recovery_state_machine(imp);
3235  out:
3236         req->rq_send_state = aa->praa_old_state;
3237
3238         if (rc != 0)
3239                 /* this replay failed, so restart recovery */
3240                 ptlrpc_connect_import(imp);
3241
3242         RETURN(rc);
3243 }
3244
3245 /**
3246  * Prepares and queues request for replay.
3247  * Adds it to ptlrpcd queue for actual sending.
3248  * Returns 0 on success.
3249  */
3250 int ptlrpc_replay_req(struct ptlrpc_request *req)
3251 {
3252         struct ptlrpc_replay_async_args *aa;
3253
3254         ENTRY;
3255
3256         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
3257
3258         aa = ptlrpc_req_async_args(aa, req);
3259         memset(aa, 0, sizeof(*aa));
3260
3261         /* Prepare request to be resent with ptlrpcd */
3262         aa->praa_old_state = req->rq_send_state;
3263         req->rq_send_state = LUSTRE_IMP_REPLAY;
3264         req->rq_phase = RQ_PHASE_NEW;
3265         req->rq_next_phase = RQ_PHASE_UNDEFINED;
3266         if (req->rq_repmsg)
3267                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
3268         req->rq_status = 0;
3269         req->rq_interpret_reply = ptlrpc_replay_interpret;
3270         /* Readjust the timeout for current conditions */
3271         ptlrpc_at_set_req_timeout(req);
3272
3273         /* Tell server net_latency to calculate how long to wait for reply. */
3274         lustre_msg_set_service_timeout(req->rq_reqmsg,
3275                                        ptlrpc_at_get_net_latency(req));
3276         DEBUG_REQ(D_HA, req, "REPLAY");
3277
3278         atomic_inc(&req->rq_import->imp_replay_inflight);
3279         spin_lock(&req->rq_lock);
3280         req->rq_early_free_repbuf = 0;
3281         spin_unlock(&req->rq_lock);
3282         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
3283
3284         ptlrpcd_add_req(req);
3285         RETURN(0);
3286 }
3287
3288 /**
3289  * Aborts all in-flight request on import \a imp sending and delayed lists
3290  */
3291 void ptlrpc_abort_inflight(struct obd_import *imp)
3292 {
3293         struct ptlrpc_request *req;
3294         ENTRY;
3295
3296         /*
3297          * Make sure that no new requests get processed for this import.
3298          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
3299          * this flag and then putting requests on sending_list or delayed_list.
3300          */
3301         assert_spin_locked(&imp->imp_lock);
3302
3303         /*
3304          * XXX locking?  Maybe we should remove each request with the list
3305          * locked?  Also, how do we know if the requests on the list are
3306          * being freed at this time?
3307          */
3308         list_for_each_entry(req, &imp->imp_sending_list, rq_list) {
3309                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
3310
3311                 spin_lock(&req->rq_lock);
3312                 if (req->rq_import_generation < imp->imp_generation) {
3313                         req->rq_err = 1;
3314                         req->rq_status = -EIO;
3315                         ptlrpc_client_wake_req(req);
3316                 }
3317                 spin_unlock(&req->rq_lock);
3318         }
3319
3320         list_for_each_entry(req, &imp->imp_delayed_list, rq_list) {
3321                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
3322
3323                 spin_lock(&req->rq_lock);
3324                 if (req->rq_import_generation < imp->imp_generation) {
3325                         req->rq_err = 1;
3326                         req->rq_status = -EIO;
3327                         ptlrpc_client_wake_req(req);
3328                 }
3329                 spin_unlock(&req->rq_lock);
3330         }
3331
3332         /*
3333          * Last chance to free reqs left on the replay list, but we
3334          * will still leak reqs that haven't committed.
3335          */
3336         if (imp->imp_replayable)
3337                 ptlrpc_free_committed(imp);
3338
3339         EXIT;
3340 }
3341
3342 /**
3343  * Abort all uncompleted requests in request set \a set
3344  */
3345 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
3346 {
3347         struct ptlrpc_request *req;
3348
3349         LASSERT(set != NULL);
3350
3351         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
3352                 spin_lock(&req->rq_lock);
3353                 if (req->rq_phase != RQ_PHASE_RPC) {
3354                         spin_unlock(&req->rq_lock);
3355                         continue;
3356                 }
3357
3358                 req->rq_err = 1;
3359                 req->rq_status = -EINTR;
3360                 ptlrpc_client_wake_req(req);
3361                 spin_unlock(&req->rq_lock);
3362         }
3363 }
3364
3365 /**
3366  * Initialize the XID for the node.  This is common among all requests on
3367  * this node, and only requires the property that it is monotonically
3368  * increasing.  It does not need to be sequential.  Since this is also used
3369  * as the RDMA match bits, it is important that a single client NOT have
3370  * the same match bits for two different in-flight requests, hence we do
3371  * NOT want to have an XID per target or similar.
3372  *
3373  * To avoid an unlikely collision between match bits after a client reboot
3374  * (which would deliver old data into the wrong RDMA buffer) initialize
3375  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
3376  * If the time is clearly incorrect, we instead use a 62-bit random number.
3377  * In the worst case the random number will overflow 1M RPCs per second in
3378  * 9133 years, or permutations thereof.
3379  */
3380 #define YEAR_2004 (1ULL << 30)
3381 void ptlrpc_init_xid(void)
3382 {
3383         time64_t now = ktime_get_real_seconds();
3384         u64 xid;
3385
3386         if (now < YEAR_2004) {
3387                 get_random_bytes(&xid, sizeof(xid));
3388                 xid >>= 2;
3389                 xid |= (1ULL << 61);
3390         } else {
3391                 xid = (u64)now << 20;
3392         }
3393
3394         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
3395         BUILD_BUG_ON((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) !=
3396                      0);
3397         xid &= PTLRPC_BULK_OPS_MASK;
3398         atomic64_set(&ptlrpc_last_xid, xid);
3399 }
3400
3401 /**
3402  * Increase xid and returns resulting new value to the caller.
3403  *
3404  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
3405  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
3406  * itself uses the last bulk xid needed, so the server can determine the
3407  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
3408  * xid must align to a power-of-two value.
3409  *
3410  * This is assumed to be true due to the initial ptlrpc_last_xid
3411  * value also being initialized to a power-of-two value. LU-1431
3412  */
3413 __u64 ptlrpc_next_xid(void)
3414 {
3415         return atomic64_add_return(PTLRPC_BULK_OPS_COUNT, &ptlrpc_last_xid);
3416 }
3417
3418 /**
3419  * If request has a new allocated XID (new request or EINPROGRESS resend),
3420  * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3421  * request to ensure previous bulk fails and avoid problems with lost replies
3422  * and therefore several transfers landing into the same buffer from different
3423  * sending attempts.
3424  * Also, to avoid previous reply landing to a different sending attempt.
3425  */
3426 void ptlrpc_set_mbits(struct ptlrpc_request *req)
3427 {
3428         int md_count = req->rq_bulk ? req->rq_bulk->bd_md_count : 1;
3429
3430         /*
3431          * Generate new matchbits for all resend requests, including
3432          * resend replay.
3433          */
3434         if (req->rq_resend) {
3435                 __u64 old_mbits = req->rq_mbits;
3436
3437                 /*
3438                  * First time resend on -EINPROGRESS will generate new xid,
3439                  * so we can actually use the rq_xid as rq_mbits in such case,
3440                  * however, it's bit hard to distinguish such resend with a
3441                  * 'resend for the -EINPROGRESS resend'. To make it simple,
3442                  * we opt to generate mbits for all resend cases.
3443                  */
3444                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3445                                  BULK_MBITS)) {
3446                         req->rq_mbits = ptlrpc_next_xid();
3447                 } else {
3448                         /*
3449                          * Old version transfers rq_xid to peer as
3450                          * matchbits.
3451                          */
3452                         spin_lock(&req->rq_import->imp_lock);
3453                         list_del_init(&req->rq_unreplied_list);
3454                         ptlrpc_assign_next_xid_nolock(req);
3455                         spin_unlock(&req->rq_import->imp_lock);
3456                         req->rq_mbits = req->rq_xid;
3457                 }
3458                 CDEBUG(D_HA, "resend with new mbits old x%llu new x%llu\n",
3459                        old_mbits, req->rq_mbits);
3460         } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
3461                 /* Request being sent first time, use xid as matchbits. */
3462                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3463                                  BULK_MBITS) || req->rq_mbits == 0)
3464                 {
3465                         req->rq_mbits = req->rq_xid;
3466                 } else {
3467                         req->rq_mbits -= md_count - 1;
3468                 }
3469         } else {
3470                 /*
3471                  * Replay request, xid and matchbits have already been
3472                  * correctly assigned.
3473                  */
3474                 return;
3475         }
3476
3477         /*
3478          * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3479          * that server can infer the number of bulks that were prepared,
3480          * see LU-1431
3481          */
3482         req->rq_mbits += md_count - 1;
3483
3484         /*
3485          * Set rq_xid as rq_mbits to indicate the final bulk for the old
3486          * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
3487          *
3488          * It's ok to directly set the rq_xid here, since this xid bump
3489          * won't affect the request position in unreplied list.
3490          */
3491         if (!OCD_HAS_FLAG(&req->rq_import->imp_connect_data, BULK_MBITS))
3492                 req->rq_xid = req->rq_mbits;
3493 }
3494
3495 /**
3496  * Get a glimpse at what next xid value might have been.
3497  * Returns possible next xid.
3498  */
3499 __u64 ptlrpc_sample_next_xid(void)
3500 {
3501         return atomic64_read(&ptlrpc_last_xid) + PTLRPC_BULK_OPS_COUNT;
3502 }
3503 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3504
3505 /**
3506  * Functions for operating ptlrpc workers.
3507  *
3508  * A ptlrpc work is a function which will be running inside ptlrpc context.
3509  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3510  *
3511  * 1. after a work is created, it can be used many times, that is:
3512  *         handler = ptlrpcd_alloc_work();
3513  *         ptlrpcd_queue_work();
3514  *
3515  *    queue it again when necessary:
3516  *         ptlrpcd_queue_work();
3517  *         ptlrpcd_destroy_work();
3518  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3519  *    it will only be queued once in any time. Also as its name implies, it may
3520  *    have delay before it really runs by ptlrpcd thread.
3521  */
3522 struct ptlrpc_work_async_args {
3523         int (*cb)(const struct lu_env *, void *);
3524         void *cbdata;
3525 };
3526
3527 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3528 {
3529         /* re-initialize the req */
3530         req->rq_timeout         = obd_timeout;
3531         req->rq_sent            = ktime_get_real_seconds();
3532         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3533         req->rq_phase           = RQ_PHASE_INTERPRET;
3534         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3535         req->rq_xid             = ptlrpc_next_xid();
3536         req->rq_import_generation = req->rq_import->imp_generation;
3537
3538         ptlrpcd_add_req(req);
3539 }
3540
3541 static int work_interpreter(const struct lu_env *env,
3542                             struct ptlrpc_request *req, void *args, int rc)
3543 {
3544         struct ptlrpc_work_async_args *arg = args;
3545
3546         LASSERT(ptlrpcd_check_work(req));
3547         LASSERT(arg->cb != NULL);
3548
3549         rc = arg->cb(env, arg->cbdata);
3550
3551         list_del_init(&req->rq_set_chain);
3552         req->rq_set = NULL;
3553
3554         if (atomic_dec_return(&req->rq_refcount) > 1) {
3555                 atomic_set(&req->rq_refcount, 2);
3556                 ptlrpcd_add_work_req(req);
3557         }
3558         return rc;
3559 }
3560
3561 static int worker_format;
3562
3563 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3564 {
3565         return req->rq_pill.rc_fmt == (void *)&worker_format;
3566 }
3567
3568 /**
3569  * Create a work for ptlrpc.
3570  */
3571 void *ptlrpcd_alloc_work(struct obd_import *imp,
3572                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3573 {
3574         struct ptlrpc_request *req = NULL;
3575         struct ptlrpc_work_async_args *args;
3576
3577         ENTRY;
3578         might_sleep();
3579
3580         if (!cb)
3581                 RETURN(ERR_PTR(-EINVAL));
3582
3583         /* copy some code from deprecated fakereq. */
3584         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3585         if (!req) {
3586                 CERROR("ptlrpc: run out of memory!\n");
3587                 RETURN(ERR_PTR(-ENOMEM));
3588         }
3589
3590         ptlrpc_cli_req_init(req);
3591
3592         req->rq_send_state = LUSTRE_IMP_FULL;
3593         req->rq_type = PTL_RPC_MSG_REQUEST;
3594         req->rq_import = class_import_get(imp);
3595         req->rq_interpret_reply = work_interpreter;
3596         /* don't want reply */
3597         req->rq_no_delay = req->rq_no_resend = 1;
3598         req->rq_pill.rc_fmt = (void *)&worker_format;
3599
3600         args = ptlrpc_req_async_args(args, req);
3601         args->cb     = cb;
3602         args->cbdata = cbdata;
3603
3604         RETURN(req);
3605 }
3606 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3607
3608 void ptlrpcd_destroy_work(void *handler)
3609 {
3610         struct ptlrpc_request *req = handler;
3611
3612         if (req)
3613                 ptlrpc_req_finished(req);
3614 }
3615 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3616
3617 int ptlrpcd_queue_work(void *handler)
3618 {
3619         struct ptlrpc_request *req = handler;
3620
3621         /*
3622          * Check if the req is already being queued.
3623          *
3624          * Here comes a trick: it lacks a way of checking if a req is being
3625          * processed reliably in ptlrpc. Here I have to use refcount of req
3626          * for this purpose. This is okay because the caller should use this
3627          * req as opaque data. - Jinshan
3628          */
3629         LASSERT(atomic_read(&req->rq_refcount) > 0);
3630         if (atomic_inc_return(&req->rq_refcount) == 2)
3631                 ptlrpcd_add_work_req(req);
3632         return 0;
3633 }
3634 EXPORT_SYMBOL(ptlrpcd_queue_work);