Whamcloud - gitweb
LU-10391 ptlrpc: change rq_peer to struct lnet_nid
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 /** Implementation of client-side PortalRPC interfaces */
33
34 #define DEBUG_SUBSYSTEM S_RPC
35
36 #include <linux/delay.h>
37 #include <linux/random.h>
38
39 #include <lnet/lib-lnet.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_lib.h>
43 #include <lustre_ha.h>
44 #include <lustre_import.h>
45 #include <lustre_req_layout.h>
46
47 #include "ptlrpc_internal.h"
48
49 static void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
50                                       struct page *page, int pageoffset,
51                                       int len)
52 {
53         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
54 }
55
56 static void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
57                                         struct page *page, int pageoffset,
58                                         int len)
59 {
60         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
61 }
62
63 static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
64 {
65         int i;
66
67         for (i = 0; i < desc->bd_iov_count ; i++)
68                 put_page(desc->bd_vec[i].bv_page);
69 }
70
71 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
72                                        void *frag, int len)
73 {
74         unsigned int offset = (unsigned long)frag & ~PAGE_MASK;
75
76         ENTRY;
77         while (len > 0) {
78                 int page_len = min_t(unsigned int, PAGE_SIZE - offset,
79                                      len);
80                 unsigned long vaddr = (unsigned long)frag;
81
82                 ptlrpc_prep_bulk_page_nopin(desc,
83                                             lnet_kvaddr_to_page(vaddr),
84                                             offset, page_len);
85                 offset = 0;
86                 len -= page_len;
87                 frag += page_len;
88         }
89
90         RETURN(desc->bd_nob);
91 }
92
93 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
94         .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
95         .release_frags  = ptlrpc_release_bulk_page_pin,
96 };
97 EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
98
99 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
100         .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
101         .release_frags  = ptlrpc_release_bulk_noop,
102         .add_iov_frag   = ptlrpc_prep_bulk_frag_pages,
103 };
104 EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
105
106 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
107 static int ptlrpcd_check_work(struct ptlrpc_request *req);
108 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
109
110 /**
111  * Initialize passed in client structure \a cl.
112  */
113 void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
114                         struct ptlrpc_client *cl)
115 {
116         cl->cli_request_portal = req_portal;
117         cl->cli_reply_portal   = rep_portal;
118         cl->cli_name           = name;
119 }
120 EXPORT_SYMBOL(ptlrpc_init_client);
121
122 /**
123  * Return PortalRPC connection for remore uud \a uuid
124  */
125 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
126                                                     u32 refnet)
127 {
128         struct ptlrpc_connection *c;
129         struct lnet_nid self;
130         struct lnet_processid peer;
131         int err;
132
133         /*
134          * ptlrpc_uuid_to_peer() initializes its 2nd parameter
135          * before accessing its values.
136          */
137         /* coverity[uninit_use_in_call] */
138         err = ptlrpc_uuid_to_peer(uuid, &peer, &self, refnet);
139         if (err != 0) {
140                 CNETERR("cannot find peer %s!\n", uuid->uuid);
141                 return NULL;
142         }
143
144         c = ptlrpc_connection_get(&peer, &self, uuid);
145         if (c) {
146                 memcpy(c->c_remote_uuid.uuid,
147                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
148         }
149
150         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
151
152         return c;
153 }
154
155 /**
156  * Allocate and initialize new bulk descriptor on the sender.
157  * Returns pointer to the descriptor or NULL on error.
158  */
159 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
160                                          unsigned int max_brw,
161                                          enum ptlrpc_bulk_op_type type,
162                                          unsigned int portal,
163                                          const struct ptlrpc_bulk_frag_ops *ops)
164 {
165         struct ptlrpc_bulk_desc *desc;
166         int i;
167
168         LASSERT(ops->add_kiov_frag != NULL);
169
170         if (max_brw > PTLRPC_BULK_OPS_COUNT)
171                 RETURN(NULL);
172
173         if (nfrags > LNET_MAX_IOV * max_brw)
174                 RETURN(NULL);
175
176         OBD_ALLOC_PTR(desc);
177         if (!desc)
178                 return NULL;
179
180         OBD_ALLOC_LARGE(desc->bd_vec,
181                         nfrags * sizeof(*desc->bd_vec));
182         if (!desc->bd_vec)
183                 goto out;
184
185         spin_lock_init(&desc->bd_lock);
186         init_waitqueue_head(&desc->bd_waitq);
187         desc->bd_max_iov = nfrags;
188         desc->bd_iov_count = 0;
189         desc->bd_portal = portal;
190         desc->bd_type = type;
191         desc->bd_md_count = 0;
192         desc->bd_nob_last = LNET_MTU;
193         desc->bd_frag_ops = ops;
194         LASSERT(max_brw > 0);
195         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
196         /*
197          * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
198          * node. Negotiated ocd_brw_size will always be <= this number.
199          */
200         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
201                 LNetInvalidateMDHandle(&desc->bd_mds[i]);
202
203         return desc;
204 out:
205         OBD_FREE_PTR(desc);
206         return NULL;
207 }
208
209 /**
210  * Prepare bulk descriptor for specified outgoing request \a req that
211  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
212  * the bulk to be sent. Used on client-side.
213  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
214  * error.
215  */
216 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
217                                               unsigned int nfrags,
218                                               unsigned int max_brw,
219                                               unsigned int type,
220                                               unsigned int portal,
221                                               const struct ptlrpc_bulk_frag_ops
222                                                 *ops)
223 {
224         struct obd_import *imp = req->rq_import;
225         struct ptlrpc_bulk_desc *desc;
226
227         ENTRY;
228         LASSERT(ptlrpc_is_bulk_op_passive(type));
229
230         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
231         if (!desc)
232                 RETURN(NULL);
233
234         desc->bd_import = class_import_get(imp);
235         desc->bd_req = req;
236
237         desc->bd_cbid.cbid_fn  = client_bulk_callback;
238         desc->bd_cbid.cbid_arg = desc;
239
240         /* This makes req own desc, and free it when she frees herself */
241         req->rq_bulk = desc;
242
243         return desc;
244 }
245 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
246
247 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
248                              struct page *page, int pageoffset, int len,
249                              int pin)
250 {
251         struct bio_vec *kiov;
252
253         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
254         LASSERT(page != NULL);
255         LASSERT(pageoffset >= 0);
256         LASSERT(len > 0);
257         LASSERT(pageoffset + len <= PAGE_SIZE);
258
259         kiov = &desc->bd_vec[desc->bd_iov_count];
260
261         if (((desc->bd_iov_count % LNET_MAX_IOV) == 0) ||
262              ((desc->bd_nob_last + len) > LNET_MTU)) {
263                 desc->bd_mds_off[desc->bd_md_count] = desc->bd_iov_count;
264                 desc->bd_md_count++;
265                 desc->bd_nob_last = 0;
266                 LASSERT(desc->bd_md_count <= PTLRPC_BULK_OPS_COUNT);
267         }
268
269         desc->bd_nob_last += len;
270         desc->bd_nob += len;
271
272         if (pin)
273                 get_page(page);
274
275         kiov->bv_page = page;
276         kiov->bv_offset = pageoffset;
277         kiov->bv_len = len;
278
279         desc->bd_iov_count++;
280 }
281 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
282
283 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
284 {
285         ENTRY;
286
287         LASSERT(desc != NULL);
288         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
289         LASSERT(desc->bd_refs == 0);         /* network hands off */
290         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
291         LASSERT(desc->bd_frag_ops != NULL);
292
293         sptlrpc_enc_pool_put_pages(desc);
294
295         if (desc->bd_export)
296                 class_export_put(desc->bd_export);
297         else
298                 class_import_put(desc->bd_import);
299
300         if (desc->bd_frag_ops->release_frags != NULL)
301                 desc->bd_frag_ops->release_frags(desc);
302
303         OBD_FREE_LARGE(desc->bd_vec,
304                        desc->bd_max_iov * sizeof(*desc->bd_vec));
305         OBD_FREE_PTR(desc);
306         EXIT;
307 }
308 EXPORT_SYMBOL(ptlrpc_free_bulk);
309
310 /**
311  * Set server timelimit for this req, i.e. how long are we willing to wait
312  * for reply before timing out this request.
313  */
314 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
315 {
316         LASSERT(req->rq_import);
317
318         if (AT_OFF) {
319                 /* non-AT settings */
320                 /**
321                  * \a imp_server_timeout means this is reverse import and
322                  * we send (currently only) ASTs to the client and cannot afford
323                  * to wait too long for the reply, otherwise the other client
324                  * (because of which we are sending this request) would
325                  * timeout waiting for us
326                  */
327                 req->rq_timeout = req->rq_import->imp_server_timeout ?
328                                   obd_timeout / 2 : obd_timeout;
329         } else {
330                 struct imp_at *at = &req->rq_import->imp_at;
331                 timeout_t serv_est;
332                 int idx;
333
334                 idx = import_at_get_index(req->rq_import,
335                                           req->rq_request_portal);
336                 serv_est = at_get(&at->iat_service_estimate[idx]);
337                 /*
338                  * Currently a 32 bit value is sent over the
339                  * wire for rq_timeout so please don't change this
340                  * to time64_t. The work for LU-1158 will in time
341                  * replace rq_timeout with a 64 bit nanosecond value
342                  */
343                 req->rq_timeout = at_est2timeout(serv_est);
344         }
345         /*
346          * We could get even fancier here, using history to predict increased
347          * loading...
348          *
349          * Let the server know what this RPC timeout is by putting it in the
350          * reqmsg
351          */
352         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
353 }
354 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
355
356 /* Adjust max service estimate based on server value */
357 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
358                                   timeout_t serv_est)
359 {
360         int idx;
361         timeout_t oldse;
362         struct imp_at *at;
363
364         LASSERT(req->rq_import);
365         at = &req->rq_import->imp_at;
366
367         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
368         /*
369          * max service estimates are tracked on the server side,
370          * so just keep minimal history here
371          */
372         oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
373         if (oldse != 0)
374                 CDEBUG(D_ADAPTTO,
375                        "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
376                        req->rq_import->imp_obd->obd_name,
377                        req->rq_request_portal,
378                        oldse, at_get(&at->iat_service_estimate[idx]));
379 }
380
381 /* Expected network latency per remote node (secs) */
382 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
383 {
384         return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
385 }
386
387 /* Adjust expected network latency */
388 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
389                                timeout_t service_timeout)
390 {
391         time64_t now = ktime_get_real_seconds();
392         struct imp_at *at;
393         timeout_t oldnl;
394         timeout_t nl;
395
396         LASSERT(req->rq_import);
397
398         if (service_timeout > now - req->rq_sent + 3) {
399                 /*
400                  * b=16408, however, this can also happen if early reply
401                  * is lost and client RPC is expired and resent, early reply
402                  * or reply of original RPC can still be fit in reply buffer
403                  * of resent RPC, now client is measuring time from the
404                  * resent time, but server sent back service time of original
405                  * RPC.
406                  */
407                 CDEBUG_LIMIT((lustre_msg_get_flags(req->rq_reqmsg) &
408                               MSG_RESENT) ?  D_ADAPTTO : D_WARNING,
409                              "Reported service time %u > total measured time %lld\n",
410                              service_timeout, now - req->rq_sent);
411                 return;
412         }
413
414         /* Network latency is total time less server processing time,
415          * st rounding
416          */
417         nl = max_t(timeout_t, now - req->rq_sent - service_timeout, 0) + 1;
418         at = &req->rq_import->imp_at;
419
420         oldnl = at_measured(&at->iat_net_latency, nl);
421         if (oldnl != 0)
422                 CDEBUG(D_ADAPTTO,
423                        "The network latency for %s (nid %s) has changed from %d to %d\n",
424                        req->rq_import->imp_obd->obd_name,
425                        obd_uuid2str(&req->rq_import->imp_connection->c_remote_uuid),
426                        oldnl, at_get(&at->iat_net_latency));
427 }
428
429 static int unpack_reply(struct ptlrpc_request *req)
430 {
431         int rc;
432
433         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
434                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
435                 if (rc) {
436                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: rc = %d",
437                                   rc);
438                         return -EPROTO;
439                 }
440         }
441
442         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
443         if (rc) {
444                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: rc = %d",
445                           rc);
446                 return -EPROTO;
447         }
448         return 0;
449 }
450
451 /**
452  * Handle an early reply message, called with the rq_lock held.
453  * If anything goes wrong just ignore it - same as if it never happened
454  */
455 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
456 __must_hold(&req->rq_lock)
457 {
458         struct ptlrpc_request *early_req;
459         timeout_t service_timeout;
460         time64_t olddl;
461         int rc;
462
463         ENTRY;
464         req->rq_early = 0;
465         spin_unlock(&req->rq_lock);
466
467         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
468         if (rc) {
469                 spin_lock(&req->rq_lock);
470                 RETURN(rc);
471         }
472
473         rc = unpack_reply(early_req);
474         if (rc != 0) {
475                 sptlrpc_cli_finish_early_reply(early_req);
476                 spin_lock(&req->rq_lock);
477                 RETURN(rc);
478         }
479
480         /*
481          * Use new timeout value just to adjust the local value for this
482          * request, don't include it into at_history. It is unclear yet why
483          * service time increased and should it be counted or skipped, e.g.
484          * that can be recovery case or some error or server, the real reply
485          * will add all new data if it is worth to add.
486          */
487         req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
488         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
489
490         /* Network latency can be adjusted, it is pure network delays */
491         service_timeout = lustre_msg_get_service_timeout(early_req->rq_repmsg);
492         ptlrpc_at_adj_net_latency(req, service_timeout);
493
494         sptlrpc_cli_finish_early_reply(early_req);
495
496         spin_lock(&req->rq_lock);
497         olddl = req->rq_deadline;
498         /*
499          * server assumes it now has rq_timeout from when the request
500          * arrived, so the client should give it at least that long.
501          * since we don't know the arrival time we'll use the original
502          * sent time
503          */
504         req->rq_deadline = req->rq_sent + req->rq_timeout +
505                            ptlrpc_at_get_net_latency(req);
506
507         /* The below message is checked in replay-single.sh test_65{a,b} */
508         /* The below message is checked in sanity-{gss,krb5} test_8 */
509         DEBUG_REQ(D_ADAPTTO, req,
510                   "Early reply #%d, new deadline in %llds (%llds)",
511                   req->rq_early_count,
512                   req->rq_deadline - ktime_get_real_seconds(),
513                   req->rq_deadline - olddl);
514
515         RETURN(rc);
516 }
517
518 static struct kmem_cache *request_cache;
519
520 int ptlrpc_request_cache_init(void)
521 {
522         request_cache = kmem_cache_create("ptlrpc_cache",
523                                           sizeof(struct ptlrpc_request),
524                                           0, SLAB_HWCACHE_ALIGN, NULL);
525         return request_cache ? 0 : -ENOMEM;
526 }
527
528 void ptlrpc_request_cache_fini(void)
529 {
530         kmem_cache_destroy(request_cache);
531 }
532
533 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
534 {
535         struct ptlrpc_request *req;
536
537         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
538         return req;
539 }
540
541 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
542 {
543         OBD_SLAB_FREE_PTR(req, request_cache);
544 }
545
546 /**
547  * Wind down request pool \a pool.
548  * Frees all requests from the pool too
549  */
550 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
551 {
552         struct ptlrpc_request *req;
553
554         LASSERT(pool != NULL);
555
556         spin_lock(&pool->prp_lock);
557         while ((req = list_first_entry_or_null(&pool->prp_req_list,
558                                                struct ptlrpc_request,
559                                                rq_list))) {
560                 list_del(&req->rq_list);
561                 LASSERT(req->rq_reqbuf);
562                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
563                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
564                 ptlrpc_request_cache_free(req);
565         }
566         spin_unlock(&pool->prp_lock);
567         OBD_FREE(pool, sizeof(*pool));
568 }
569 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
570
571 /**
572  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
573  */
574 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
575 {
576         int i;
577         int size = 1;
578
579         while (size < pool->prp_rq_size)
580                 size <<= 1;
581
582         LASSERTF(list_empty(&pool->prp_req_list) ||
583                  size == pool->prp_rq_size,
584                  "Trying to change pool size with nonempty pool from %d to %d bytes\n",
585                  pool->prp_rq_size, size);
586
587         pool->prp_rq_size = size;
588         for (i = 0; i < num_rq; i++) {
589                 struct ptlrpc_request *req;
590                 struct lustre_msg *msg;
591
592                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
593                 if (!req)
594                         return i;
595                 OBD_ALLOC_LARGE(msg, size);
596                 if (!msg) {
597                         ptlrpc_request_cache_free(req);
598                         return i;
599                 }
600                 req->rq_reqbuf = msg;
601                 req->rq_reqbuf_len = size;
602                 req->rq_pool = pool;
603                 spin_lock(&pool->prp_lock);
604                 list_add_tail(&req->rq_list, &pool->prp_req_list);
605                 spin_unlock(&pool->prp_lock);
606         }
607         return num_rq;
608 }
609 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
610
611 /**
612  * Create and initialize new request pool with given attributes:
613  * \a num_rq - initial number of requests to create for the pool
614  * \a msgsize - maximum message size possible for requests in thid pool
615  * \a populate_pool - function to be called when more requests need to be added
616  *                    to the pool
617  * Returns pointer to newly created pool or NULL on error.
618  */
619 struct ptlrpc_request_pool *
620 ptlrpc_init_rq_pool(int num_rq, int msgsize,
621                     int (*populate_pool)(struct ptlrpc_request_pool *, int))
622 {
623         struct ptlrpc_request_pool *pool;
624
625         OBD_ALLOC_PTR(pool);
626         if (!pool)
627                 return NULL;
628
629         /*
630          * Request next power of two for the allocation, because internally
631          * kernel would do exactly this
632          */
633         spin_lock_init(&pool->prp_lock);
634         INIT_LIST_HEAD(&pool->prp_req_list);
635         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
636         pool->prp_populate = populate_pool;
637
638         populate_pool(pool, num_rq);
639
640         return pool;
641 }
642 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
643
644 /**
645  * Fetches one request from pool \a pool
646  */
647 static struct ptlrpc_request *
648 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
649 {
650         struct ptlrpc_request *request;
651         struct lustre_msg *reqbuf;
652
653         if (!pool)
654                 return NULL;
655
656         spin_lock(&pool->prp_lock);
657
658         /*
659          * See if we have anything in a pool, and bail out if nothing,
660          * in writeout path, where this matters, this is safe to do, because
661          * nothing is lost in this case, and when some in-flight requests
662          * complete, this code will be called again.
663          */
664         if (unlikely(list_empty(&pool->prp_req_list))) {
665                 spin_unlock(&pool->prp_lock);
666                 return NULL;
667         }
668
669         request = list_first_entry(&pool->prp_req_list, struct ptlrpc_request,
670                                    rq_list);
671         list_del_init(&request->rq_list);
672         spin_unlock(&pool->prp_lock);
673
674         LASSERT(request->rq_reqbuf);
675         LASSERT(request->rq_pool);
676
677         reqbuf = request->rq_reqbuf;
678         memset(request, 0, sizeof(*request));
679         request->rq_reqbuf = reqbuf;
680         request->rq_reqbuf_len = pool->prp_rq_size;
681         request->rq_pool = pool;
682
683         return request;
684 }
685
686 /**
687  * Returns freed \a request to pool.
688  */
689 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
690 {
691         struct ptlrpc_request_pool *pool = request->rq_pool;
692
693         spin_lock(&pool->prp_lock);
694         LASSERT(list_empty(&request->rq_list));
695         LASSERT(!request->rq_receiving_reply);
696         list_add_tail(&request->rq_list, &pool->prp_req_list);
697         spin_unlock(&pool->prp_lock);
698 }
699
700 void ptlrpc_add_unreplied(struct ptlrpc_request *req)
701 {
702         struct obd_import *imp = req->rq_import;
703         struct ptlrpc_request *iter;
704
705         assert_spin_locked(&imp->imp_lock);
706         LASSERT(list_empty(&req->rq_unreplied_list));
707
708         /* unreplied list is sorted by xid in ascending order */
709         list_for_each_entry_reverse(iter, &imp->imp_unreplied_list,
710                                     rq_unreplied_list) {
711                 LASSERT(req->rq_xid != iter->rq_xid);
712                 if (req->rq_xid < iter->rq_xid)
713                         continue;
714                 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
715                 return;
716         }
717         list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
718 }
719
720 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
721 {
722         req->rq_xid = ptlrpc_next_xid();
723         ptlrpc_add_unreplied(req);
724 }
725
726 static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
727 {
728         spin_lock(&req->rq_import->imp_lock);
729         ptlrpc_assign_next_xid_nolock(req);
730         spin_unlock(&req->rq_import->imp_lock);
731 }
732
733 static atomic64_t ptlrpc_last_xid;
734
735 static void ptlrpc_reassign_next_xid(struct ptlrpc_request *req)
736 {
737         spin_lock(&req->rq_import->imp_lock);
738         list_del_init(&req->rq_unreplied_list);
739         ptlrpc_assign_next_xid_nolock(req);
740         spin_unlock(&req->rq_import->imp_lock);
741         DEBUG_REQ(D_RPCTRACE, req, "reassign xid");
742 }
743
744 void ptlrpc_get_mod_rpc_slot(struct ptlrpc_request *req)
745 {
746         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
747         __u32 opc;
748         __u16 tag;
749
750         opc = lustre_msg_get_opc(req->rq_reqmsg);
751         tag = obd_get_mod_rpc_slot(cli, opc);
752         lustre_msg_set_tag(req->rq_reqmsg, tag);
753         ptlrpc_reassign_next_xid(req);
754 }
755 EXPORT_SYMBOL(ptlrpc_get_mod_rpc_slot);
756
757 void ptlrpc_put_mod_rpc_slot(struct ptlrpc_request *req)
758 {
759         __u16 tag = lustre_msg_get_tag(req->rq_reqmsg);
760
761         if (tag != 0) {
762                 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
763                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
764
765                 obd_put_mod_rpc_slot(cli, opc, tag);
766         }
767 }
768 EXPORT_SYMBOL(ptlrpc_put_mod_rpc_slot);
769
770 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
771                              __u32 version, int opcode, char **bufs,
772                              struct ptlrpc_cli_ctx *ctx)
773 {
774         int count;
775         struct obd_import *imp;
776         __u32 *lengths;
777         int rc;
778
779         ENTRY;
780
781         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
782         imp = request->rq_import;
783         lengths = request->rq_pill.rc_area[RCL_CLIENT];
784
785         if (ctx) {
786                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
787         } else {
788                 rc = sptlrpc_req_get_ctx(request);
789                 if (rc)
790                         GOTO(out_free, rc);
791         }
792         sptlrpc_req_set_flavor(request, opcode);
793
794         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
795                                  lengths, bufs);
796         if (rc)
797                 GOTO(out_ctx, rc);
798
799         lustre_msg_add_version(request->rq_reqmsg, version);
800         request->rq_send_state = LUSTRE_IMP_FULL;
801         request->rq_type = PTL_RPC_MSG_REQUEST;
802
803         request->rq_req_cbid.cbid_fn  = request_out_callback;
804         request->rq_req_cbid.cbid_arg = request;
805
806         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
807         request->rq_reply_cbid.cbid_arg = request;
808
809         request->rq_reply_deadline = 0;
810         request->rq_bulk_deadline = 0;
811         request->rq_req_deadline = 0;
812         request->rq_phase = RQ_PHASE_NEW;
813         request->rq_next_phase = RQ_PHASE_UNDEFINED;
814
815         request->rq_request_portal = imp->imp_client->cli_request_portal;
816         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
817
818         ptlrpc_at_set_req_timeout(request);
819
820         lustre_msg_set_opc(request->rq_reqmsg, opcode);
821
822         /* Let's setup deadline for req/reply/bulk unlink for opcode. */
823         if (cfs_fail_val == opcode) {
824                 time64_t *fail_t = NULL, *fail2_t = NULL;
825
826                 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
827                         fail_t = &request->rq_bulk_deadline;
828                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
829                         fail_t = &request->rq_reply_deadline;
830                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
831                         fail_t = &request->rq_req_deadline;
832                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
833                         fail_t = &request->rq_reply_deadline;
834                         fail2_t = &request->rq_bulk_deadline;
835                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_ROUND_XID)) {
836                         time64_t now = ktime_get_real_seconds();
837                         u64 xid = ((u64)now >> 4) << 24;
838
839                         atomic64_set(&ptlrpc_last_xid, xid);
840                 }
841
842                 if (fail_t) {
843                         *fail_t = ktime_get_real_seconds() +
844                                   PTLRPC_REQ_LONG_UNLINK;
845
846                         if (fail2_t)
847                                 *fail2_t = ktime_get_real_seconds() +
848                                            PTLRPC_REQ_LONG_UNLINK;
849
850                         /*
851                          * The RPC is infected, let the test to change the
852                          * fail_loc
853                          */
854                         msleep(4 * MSEC_PER_SEC);
855                 }
856         }
857         ptlrpc_assign_next_xid(request);
858
859         RETURN(0);
860
861 out_ctx:
862         LASSERT(!request->rq_pool);
863         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
864 out_free:
865         atomic_dec(&imp->imp_reqs);
866         class_import_put(imp);
867
868         return rc;
869 }
870 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
871
872 /**
873  * Pack request buffers for network transfer, performing necessary encryption
874  * steps if necessary.
875  */
876 int ptlrpc_request_pack(struct ptlrpc_request *request,
877                         __u32 version, int opcode)
878 {
879         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
880 }
881 EXPORT_SYMBOL(ptlrpc_request_pack);
882
883 /**
884  * Helper function to allocate new request on import \a imp
885  * and possibly using existing request from pool \a pool if provided.
886  * Returns allocated request structure with import field filled or
887  * NULL on error.
888  */
889 static inline
890 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
891                                               struct ptlrpc_request_pool *pool)
892 {
893         struct ptlrpc_request *request = NULL;
894
895         request = ptlrpc_request_cache_alloc(GFP_NOFS);
896
897         if (!request && pool)
898                 request = ptlrpc_prep_req_from_pool(pool);
899
900         if (request) {
901                 ptlrpc_cli_req_init(request);
902
903                 LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp);
904                 LASSERT(imp != LP_POISON);
905                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
906                          imp->imp_client);
907                 LASSERT(imp->imp_client != LP_POISON);
908
909                 request->rq_import = class_import_get(imp);
910                 atomic_inc(&imp->imp_reqs);
911         } else {
912                 CERROR("request allocation out of memory\n");
913         }
914
915         return request;
916 }
917
918 static int ptlrpc_reconnect_if_idle(struct obd_import *imp)
919 {
920         int rc;
921
922         /*
923          * initiate connection if needed when the import has been
924          * referenced by the new request to avoid races with disconnect.
925          * serialize this check against conditional state=IDLE
926          * in ptlrpc_disconnect_idle_interpret()
927          */
928         spin_lock(&imp->imp_lock);
929         if (imp->imp_state == LUSTRE_IMP_IDLE) {
930                 imp->imp_generation++;
931                 imp->imp_initiated_at = imp->imp_generation;
932                 imp->imp_state = LUSTRE_IMP_NEW;
933
934                 /* connect_import_locked releases imp_lock */
935                 rc = ptlrpc_connect_import_locked(imp);
936                 if (rc)
937                         return rc;
938                 ptlrpc_pinger_add_import(imp);
939         } else {
940                 spin_unlock(&imp->imp_lock);
941         }
942         return 0;
943 }
944
945 /**
946  * Helper function for creating a request.
947  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
948  * buffer structures according to capsule template \a format.
949  * Returns allocated request structure pointer or NULL on error.
950  */
951 static struct ptlrpc_request *
952 ptlrpc_request_alloc_internal(struct obd_import *imp,
953                               struct ptlrpc_request_pool *pool,
954                               const struct req_format *format)
955 {
956         struct ptlrpc_request *request;
957
958         request = __ptlrpc_request_alloc(imp, pool);
959         if (!request)
960                 return NULL;
961
962         /* don't make expensive check for idling connection
963          * if it's already connected */
964         if (unlikely(imp->imp_state != LUSTRE_IMP_FULL)) {
965                 if (ptlrpc_reconnect_if_idle(imp) < 0) {
966                         atomic_dec(&imp->imp_reqs);
967                         ptlrpc_request_free(request);
968                         return NULL;
969                 }
970         }
971
972         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
973         req_capsule_set(&request->rq_pill, format);
974         return request;
975 }
976
977 /**
978  * Allocate new request structure for import \a imp and initialize its
979  * buffer structure according to capsule template \a format.
980  */
981 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
982                                             const struct req_format *format)
983 {
984         return ptlrpc_request_alloc_internal(imp, NULL, format);
985 }
986 EXPORT_SYMBOL(ptlrpc_request_alloc);
987
988 /**
989  * Allocate new request structure for import \a imp from pool \a pool and
990  * initialize its buffer structure according to capsule template \a format.
991  */
992 struct ptlrpc_request *
993 ptlrpc_request_alloc_pool(struct obd_import *imp,
994                           struct ptlrpc_request_pool *pool,
995                           const struct req_format *format)
996 {
997         return ptlrpc_request_alloc_internal(imp, pool, format);
998 }
999 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
1000
1001 /**
1002  * For requests not from pool, free memory of the request structure.
1003  * For requests obtained from a pool earlier, return request back to pool.
1004  */
1005 void ptlrpc_request_free(struct ptlrpc_request *request)
1006 {
1007         if (request->rq_pool)
1008                 __ptlrpc_free_req_to_pool(request);
1009         else
1010                 ptlrpc_request_cache_free(request);
1011 }
1012 EXPORT_SYMBOL(ptlrpc_request_free);
1013
1014 /**
1015  * Allocate new request for operatione \a opcode and immediatelly pack it for
1016  * network transfer.
1017  * Only used for simple requests like OBD_PING where the only important
1018  * part of the request is operation itself.
1019  * Returns allocated request or NULL on error.
1020  */
1021 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1022                                                  const struct req_format *format,
1023                                                  __u32 version, int opcode)
1024 {
1025         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
1026         int rc;
1027
1028         if (req) {
1029                 rc = ptlrpc_request_pack(req, version, opcode);
1030                 if (rc) {
1031                         ptlrpc_request_free(req);
1032                         req = NULL;
1033                 }
1034         }
1035         return req;
1036 }
1037 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
1038
1039 /**
1040  * Allocate and initialize new request set structure on the current CPT.
1041  * Returns a pointer to the newly allocated set structure or NULL on error.
1042  */
1043 struct ptlrpc_request_set *ptlrpc_prep_set(void)
1044 {
1045         struct ptlrpc_request_set *set;
1046         int cpt;
1047
1048         ENTRY;
1049         cpt = cfs_cpt_current(cfs_cpt_tab, 0);
1050         OBD_CPT_ALLOC(set, cfs_cpt_tab, cpt, sizeof(*set));
1051         if (!set)
1052                 RETURN(NULL);
1053         atomic_set(&set->set_refcount, 1);
1054         INIT_LIST_HEAD(&set->set_requests);
1055         init_waitqueue_head(&set->set_waitq);
1056         atomic_set(&set->set_new_count, 0);
1057         atomic_set(&set->set_remaining, 0);
1058         spin_lock_init(&set->set_new_req_lock);
1059         INIT_LIST_HEAD(&set->set_new_requests);
1060         set->set_max_inflight = UINT_MAX;
1061         set->set_producer     = NULL;
1062         set->set_producer_arg = NULL;
1063         set->set_rc           = 0;
1064
1065         RETURN(set);
1066 }
1067 EXPORT_SYMBOL(ptlrpc_prep_set);
1068
1069 /**
1070  * Allocate and initialize new request set structure with flow control
1071  * extension. This extension allows to control the number of requests in-flight
1072  * for the whole set. A callback function to generate requests must be provided
1073  * and the request set will keep the number of requests sent over the wire to
1074  * @max_inflight.
1075  * Returns a pointer to the newly allocated set structure or NULL on error.
1076  */
1077 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1078                                              void *arg)
1079
1080 {
1081         struct ptlrpc_request_set *set;
1082
1083         set = ptlrpc_prep_set();
1084         if (!set)
1085                 RETURN(NULL);
1086
1087         set->set_max_inflight  = max;
1088         set->set_producer      = func;
1089         set->set_producer_arg  = arg;
1090
1091         RETURN(set);
1092 }
1093
1094 /**
1095  * Wind down and free request set structure previously allocated with
1096  * ptlrpc_prep_set.
1097  * Ensures that all requests on the set have completed and removes
1098  * all requests from the request list in a set.
1099  * If any unsent request happen to be on the list, pretends that they got
1100  * an error in flight and calls their completion handler.
1101  */
1102 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
1103 {
1104         struct ptlrpc_request *req;
1105         int expected_phase;
1106         int n = 0;
1107
1108         ENTRY;
1109
1110         /* Requests on the set should either all be completed, or all be new */
1111         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
1112                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
1113         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
1114                 LASSERT(req->rq_phase == expected_phase);
1115                 n++;
1116         }
1117
1118         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
1119                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
1120                  atomic_read(&set->set_remaining), n);
1121
1122         while ((req = list_first_entry_or_null(&set->set_requests,
1123                                                struct ptlrpc_request,
1124                                                rq_set_chain))) {
1125                 list_del_init(&req->rq_set_chain);
1126
1127                 LASSERT(req->rq_phase == expected_phase);
1128
1129                 if (req->rq_phase == RQ_PHASE_NEW) {
1130                         ptlrpc_req_interpret(NULL, req, -EBADR);
1131                         atomic_dec(&set->set_remaining);
1132                 }
1133
1134                 spin_lock(&req->rq_lock);
1135                 req->rq_set = NULL;
1136                 req->rq_invalid_rqset = 0;
1137                 spin_unlock(&req->rq_lock);
1138
1139                 ptlrpc_req_finished(req);
1140         }
1141
1142         LASSERT(atomic_read(&set->set_remaining) == 0);
1143
1144         ptlrpc_reqset_put(set);
1145         EXIT;
1146 }
1147 EXPORT_SYMBOL(ptlrpc_set_destroy);
1148
1149 /**
1150  * Add a new request to the general purpose request set.
1151  * Assumes request reference from the caller.
1152  */
1153 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
1154                         struct ptlrpc_request *req)
1155 {
1156         if (set == PTLRPCD_SET) {
1157                 ptlrpcd_add_req(req);
1158                 return;
1159         }
1160
1161         LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
1162         LASSERT(list_empty(&req->rq_set_chain));
1163
1164         if (req->rq_allow_intr)
1165                 set->set_allow_intr = 1;
1166
1167         /* The set takes over the caller's request reference */
1168         list_add_tail(&req->rq_set_chain, &set->set_requests);
1169         req->rq_set = set;
1170         atomic_inc(&set->set_remaining);
1171         req->rq_queued_time = ktime_get_seconds();
1172
1173         if (req->rq_reqmsg)
1174                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1175
1176         if (set->set_producer)
1177                 /*
1178                  * If the request set has a producer callback, the RPC must be
1179                  * sent straight away
1180                  */
1181                 ptlrpc_send_new_req(req);
1182 }
1183 EXPORT_SYMBOL(ptlrpc_set_add_req);
1184
1185 /**
1186  * Add a request to a request with dedicated server thread
1187  * and wake the thread to make any necessary processing.
1188  * Currently only used for ptlrpcd.
1189  */
1190 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1191                             struct ptlrpc_request *req)
1192 {
1193         struct ptlrpc_request_set *set = pc->pc_set;
1194         int count, i;
1195
1196         LASSERT(req->rq_set == NULL);
1197         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1198
1199         spin_lock(&set->set_new_req_lock);
1200         /*
1201          * The set takes over the caller's request reference.
1202          */
1203         req->rq_set = set;
1204         req->rq_queued_time = ktime_get_seconds();
1205         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1206         count = atomic_inc_return(&set->set_new_count);
1207         spin_unlock(&set->set_new_req_lock);
1208
1209         /* Only need to call wakeup once for the first entry. */
1210         if (count == 1) {
1211                 wake_up(&set->set_waitq);
1212
1213                 /*
1214                  * XXX: It maybe unnecessary to wakeup all the partners. But to
1215                  *      guarantee the async RPC can be processed ASAP, we have
1216                  *      no other better choice. It maybe fixed in future.
1217                  */
1218                 for (i = 0; i < pc->pc_npartners; i++)
1219                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1220         }
1221 }
1222
1223 /**
1224  * Based on the current state of the import, determine if the request
1225  * can be sent, is an error, or should be delayed.
1226  *
1227  * Returns true if this request should be delayed. If false, and
1228  * *status is set, then the request can not be sent and *status is the
1229  * error code.  If false and status is 0, then request can be sent.
1230  *
1231  * The imp->imp_lock must be held.
1232  */
1233 static int ptlrpc_import_delay_req(struct obd_import *imp,
1234                                    struct ptlrpc_request *req, int *status)
1235 {
1236         int delay = 0;
1237
1238         ENTRY;
1239         LASSERT(status);
1240         *status = 0;
1241
1242         if (req->rq_ctx_init || req->rq_ctx_fini) {
1243                 /* always allow ctx init/fini rpc go through */
1244         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1245                 DEBUG_REQ(D_ERROR, req, "Uninitialized import");
1246                 *status = -EIO;
1247         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1248                 unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
1249
1250                 /*
1251                  * pings or MDS-equivalent STATFS may safely
1252                  * race with umount
1253                  */
1254                 DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
1255                           D_HA : D_ERROR, req, "IMP_CLOSED");
1256                 *status = -EIO;
1257         } else if (ptlrpc_send_limit_expired(req)) {
1258                 /* probably doesn't need to be a D_ERROR afterinitial testing */
1259                 DEBUG_REQ(D_HA, req, "send limit expired");
1260                 *status = -ETIMEDOUT;
1261         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1262                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1263                 ;/* allow CONNECT even if import is invalid */
1264                 if (atomic_read(&imp->imp_inval_count) != 0) {
1265                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1266                         *status = -EIO;
1267                 }
1268         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1269                 if (!imp->imp_deactive)
1270                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1271                 *status = -ESHUTDOWN; /* b=12940 */
1272         } else if (req->rq_import_generation != imp->imp_generation) {
1273                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
1274                 *status = -EIO;
1275         } else if (req->rq_send_state != imp->imp_state) {
1276                 /* invalidate in progress - any requests should be drop */
1277                 if (atomic_read(&imp->imp_inval_count) != 0) {
1278                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1279                         *status = -EIO;
1280                 } else if (req->rq_no_delay &&
1281                            imp->imp_generation != imp->imp_initiated_at) {
1282                         /* ignore nodelay for requests initiating connections */
1283                         *status = -EAGAIN;
1284                 } else if (req->rq_allow_replay &&
1285                            (imp->imp_state == LUSTRE_IMP_REPLAY ||
1286                             imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1287                             imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1288                             imp->imp_state == LUSTRE_IMP_RECOVER)) {
1289                         DEBUG_REQ(D_HA, req, "allow during recovery");
1290                 } else {
1291                         delay = 1;
1292                 }
1293         }
1294
1295         RETURN(delay);
1296 }
1297
1298 /**
1299  * Decide if the error message should be printed to the console or not.
1300  * Makes its decision based on request type, status, and failure frequency.
1301  *
1302  * \param[in] req  request that failed and may need a console message
1303  *
1304  * \retval false if no message should be printed
1305  * \retval true  if console message should be printed
1306  */
1307 static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
1308 {
1309         LASSERT(req->rq_reqmsg != NULL);
1310
1311         /* Suppress particular reconnect errors which are to be expected. */
1312         if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
1313                 /* Suppress timed out reconnect requests */
1314                 if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
1315                     req->rq_timedout)
1316                         return false;
1317
1318                 /*
1319                  * Suppress most unavailable/again reconnect requests, but
1320                  * print occasionally so it is clear client is trying to
1321                  * connect to a server where no target is running.
1322                  */
1323                 if ((err == -ENODEV || err == -EAGAIN) &&
1324                     req->rq_import->imp_conn_cnt % 30 != 20)
1325                         return false;
1326         }
1327
1328         if (opc == LDLM_ENQUEUE && err == -EAGAIN)
1329                 /* -EAGAIN is normal when using POSIX flocks */
1330                 return false;
1331
1332         if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
1333             (req->rq_xid & 0xf) != 10)
1334                 /* Suppress most ping requests, they may fail occasionally */
1335                 return false;
1336
1337         return true;
1338 }
1339
1340 /**
1341  * Check request processing status.
1342  * Returns the status.
1343  */
1344 static int ptlrpc_check_status(struct ptlrpc_request *req)
1345 {
1346         int rc;
1347
1348         ENTRY;
1349         rc = lustre_msg_get_status(req->rq_repmsg);
1350         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1351                 struct obd_import *imp = req->rq_import;
1352                 struct lnet_nid *nid = &imp->imp_connection->c_peer.nid;
1353                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1354
1355                 if (ptlrpc_console_allow(req, opc, rc))
1356                         LCONSOLE_ERROR_MSG(0x11,
1357                                            "%s: operation %s to node %s failed: rc = %d\n",
1358                                            imp->imp_obd->obd_name,
1359                                            ll_opcode2str(opc),
1360                                            libcfs_nidstr(nid), rc);
1361                 RETURN(rc < 0 ? rc : -EINVAL);
1362         }
1363
1364         if (rc)
1365                 DEBUG_REQ(D_INFO, req, "check status: rc = %d", rc);
1366
1367         RETURN(rc);
1368 }
1369
1370 /**
1371  * save pre-versions of objects into request for replay.
1372  * Versions are obtained from server reply.
1373  * used for VBR.
1374  */
1375 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1376 {
1377         struct lustre_msg *repmsg = req->rq_repmsg;
1378         struct lustre_msg *reqmsg = req->rq_reqmsg;
1379         __u64 *versions = lustre_msg_get_versions(repmsg);
1380
1381         ENTRY;
1382         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1383                 return;
1384
1385         LASSERT(versions);
1386         lustre_msg_set_versions(reqmsg, versions);
1387         CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
1388                versions[0], versions[1]);
1389
1390         EXIT;
1391 }
1392
1393 __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1394 {
1395         struct ptlrpc_request *req;
1396
1397         assert_spin_locked(&imp->imp_lock);
1398         if (list_empty(&imp->imp_unreplied_list))
1399                 return 0;
1400
1401         req = list_first_entry(&imp->imp_unreplied_list, struct ptlrpc_request,
1402                                rq_unreplied_list);
1403         LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1404
1405         if (imp->imp_known_replied_xid < req->rq_xid - 1)
1406                 imp->imp_known_replied_xid = req->rq_xid - 1;
1407
1408         return req->rq_xid - 1;
1409 }
1410
1411 /**
1412  * Callback function called when client receives RPC reply for \a req.
1413  * Returns 0 on success or error code.
1414  * The return alue would be assigned to req->rq_status by the caller
1415  * as request processing status.
1416  * This function also decides if the request needs to be saved for later replay.
1417  */
1418 static int after_reply(struct ptlrpc_request *req)
1419 {
1420         struct obd_import *imp = req->rq_import;
1421         struct obd_device *obd = req->rq_import->imp_obd;
1422         ktime_t work_start;
1423         u64 committed;
1424         s64 timediff;
1425         int rc;
1426
1427         ENTRY;
1428         LASSERT(obd != NULL);
1429         /* repbuf must be unlinked */
1430         LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
1431
1432         if (req->rq_reply_truncated) {
1433                 if (ptlrpc_no_resend(req)) {
1434                         DEBUG_REQ(D_ERROR, req,
1435                                   "reply buffer overflow, expected=%d, actual size=%d",
1436                                   req->rq_nob_received, req->rq_repbuf_len);
1437                         RETURN(-EOVERFLOW);
1438                 }
1439
1440                 sptlrpc_cli_free_repbuf(req);
1441                 /*
1442                  * Pass the required reply buffer size (include
1443                  * space for early reply).
1444                  * NB: no need to roundup because alloc_repbuf
1445                  * will roundup it
1446                  */
1447                 req->rq_replen = req->rq_nob_received;
1448                 req->rq_nob_received = 0;
1449                 spin_lock(&req->rq_lock);
1450                 req->rq_resend       = 1;
1451                 spin_unlock(&req->rq_lock);
1452                 RETURN(0);
1453         }
1454
1455         work_start = ktime_get_real();
1456         timediff = ktime_us_delta(work_start, req->rq_sent_ns);
1457
1458         /*
1459          * NB Until this point, the whole of the incoming message,
1460          * including buflens, status etc is in the sender's byte order.
1461          */
1462         rc = sptlrpc_cli_unwrap_reply(req);
1463         if (rc) {
1464                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed: rc = %d", rc);
1465                 RETURN(rc);
1466         }
1467
1468         /*
1469          * Security layer unwrap might ask resend this request.
1470          */
1471         if (req->rq_resend)
1472                 RETURN(0);
1473
1474         rc = unpack_reply(req);
1475         if (rc)
1476                 RETURN(rc);
1477
1478         /* retry indefinitely on EINPROGRESS */
1479         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1480             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1481                 time64_t now = ktime_get_real_seconds();
1482
1483                 DEBUG_REQ((req->rq_nr_resend % 8 == 1 ? D_WARNING : 0) |
1484                           D_RPCTRACE, req, "resending request on EINPROGRESS");
1485                 spin_lock(&req->rq_lock);
1486                 req->rq_resend = 1;
1487                 spin_unlock(&req->rq_lock);
1488                 req->rq_nr_resend++;
1489
1490                 /* Readjust the timeout for current conditions */
1491                 ptlrpc_at_set_req_timeout(req);
1492                 /*
1493                  * delay resend to give a chance to the server to get ready.
1494                  * The delay is increased by 1s on every resend and is capped to
1495                  * the current request timeout (i.e. obd_timeout if AT is off,
1496                  * or AT service time x 125% + 5s, see at_est2timeout)
1497                  */
1498                 if (req->rq_nr_resend > req->rq_timeout)
1499                         req->rq_sent = now + req->rq_timeout;
1500                 else
1501                         req->rq_sent = now + req->rq_nr_resend;
1502
1503                 /* Resend for EINPROGRESS will use a new XID */
1504                 spin_lock(&imp->imp_lock);
1505                 list_del_init(&req->rq_unreplied_list);
1506                 spin_unlock(&imp->imp_lock);
1507
1508                 RETURN(0);
1509         }
1510
1511         if (obd->obd_svc_stats) {
1512                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1513                                     timediff);
1514                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1515         }
1516
1517         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1518             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1519                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1520                           lustre_msg_get_type(req->rq_repmsg));
1521                 RETURN(-EPROTO);
1522         }
1523
1524         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1525                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1526         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1527         ptlrpc_at_adj_net_latency(req,
1528                                   lustre_msg_get_service_timeout(req->rq_repmsg));
1529
1530         rc = ptlrpc_check_status(req);
1531
1532         if (rc) {
1533                 /*
1534                  * Either we've been evicted, or the server has failed for
1535                  * some reason. Try to reconnect, and if that fails, punt to
1536                  * the upcall.
1537                  */
1538                 if (ptlrpc_recoverable_error(rc)) {
1539                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1540                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1541                                 RETURN(rc);
1542                         }
1543                         ptlrpc_request_handle_notconn(req);
1544                         RETURN(rc);
1545                 }
1546         } else {
1547                 /*
1548                  * Let's look if server sent slv. Do it only for RPC with
1549                  * rc == 0.
1550                  */
1551                 ldlm_cli_update_pool(req);
1552         }
1553
1554         /*
1555          * Store transno in reqmsg for replay.
1556          */
1557         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1558                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1559                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1560         }
1561
1562         if (imp->imp_replayable) {
1563                 spin_lock(&imp->imp_lock);
1564                 /*
1565                  * No point in adding already-committed requests to the replay
1566                  * list, we will just remove them immediately. b=9829
1567                  */
1568                 if (req->rq_transno != 0 &&
1569                     (req->rq_transno >
1570                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1571                      req->rq_replay)) {
1572                         /** version recovery */
1573                         ptlrpc_save_versions(req);
1574                         ptlrpc_retain_replayable_request(req, imp);
1575                 } else if (req->rq_commit_cb &&
1576                            list_empty(&req->rq_replay_list)) {
1577                         /*
1578                          * NB: don't call rq_commit_cb if it's already on
1579                          * rq_replay_list, ptlrpc_free_committed() will call
1580                          * it later, see LU-3618 for details
1581                          */
1582                         spin_unlock(&imp->imp_lock);
1583                         req->rq_commit_cb(req);
1584                         spin_lock(&imp->imp_lock);
1585                 }
1586
1587                 /*
1588                  * Replay-enabled imports return commit-status information.
1589                  */
1590                 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1591                 if (likely(committed > imp->imp_peer_committed_transno))
1592                         imp->imp_peer_committed_transno = committed;
1593
1594                 ptlrpc_free_committed(imp);
1595
1596                 if (!list_empty(&imp->imp_replay_list)) {
1597                         struct ptlrpc_request *last;
1598
1599                         last = list_entry(imp->imp_replay_list.prev,
1600                                           struct ptlrpc_request,
1601                                           rq_replay_list);
1602                         /*
1603                          * Requests with rq_replay stay on the list even if no
1604                          * commit is expected.
1605                          */
1606                         if (last->rq_transno > imp->imp_peer_committed_transno)
1607                                 ptlrpc_pinger_commit_expected(imp);
1608                 }
1609
1610                 spin_unlock(&imp->imp_lock);
1611         }
1612
1613         RETURN(rc);
1614 }
1615
1616 /**
1617  * Helper function to send request \a req over the network for the first time
1618  * Also adjusts request phase.
1619  * Returns 0 on success or error code.
1620  */
1621 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1622 {
1623         struct obd_import *imp = req->rq_import;
1624         __u64 min_xid = 0;
1625         int rc;
1626
1627         ENTRY;
1628         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1629
1630         /* do not try to go further if there is not enough memory in enc_pool */
1631         if (req->rq_sent && req->rq_bulk)
1632                 if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
1633                     pool_is_at_full_capacity())
1634                         RETURN(-ENOMEM);
1635
1636         if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1637             (!req->rq_generation_set ||
1638              req->rq_import_generation == imp->imp_generation))
1639                 RETURN(0);
1640
1641         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1642
1643         spin_lock(&imp->imp_lock);
1644
1645         LASSERT(req->rq_xid != 0);
1646         LASSERT(!list_empty(&req->rq_unreplied_list));
1647
1648         if (!req->rq_generation_set)
1649                 req->rq_import_generation = imp->imp_generation;
1650
1651         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1652                 spin_lock(&req->rq_lock);
1653                 req->rq_waiting = 1;
1654                 spin_unlock(&req->rq_lock);
1655
1656                 DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
1657                           ptlrpc_import_state_name(req->rq_send_state),
1658                           ptlrpc_import_state_name(imp->imp_state));
1659                 LASSERT(list_empty(&req->rq_list));
1660                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1661                 atomic_inc(&req->rq_import->imp_inflight);
1662                 spin_unlock(&imp->imp_lock);
1663                 RETURN(0);
1664         }
1665
1666         if (rc != 0) {
1667                 spin_unlock(&imp->imp_lock);
1668                 req->rq_status = rc;
1669                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1670                 RETURN(rc);
1671         }
1672
1673         LASSERT(list_empty(&req->rq_list));
1674         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1675         atomic_inc(&req->rq_import->imp_inflight);
1676
1677         /*
1678          * find the known replied XID from the unreplied list, CONNECT
1679          * and DISCONNECT requests are skipped to make the sanity check
1680          * on server side happy. see process_req_last_xid().
1681          *
1682          * For CONNECT: Because replay requests have lower XID, it'll
1683          * break the sanity check if CONNECT bump the exp_last_xid on
1684          * server.
1685          *
1686          * For DISCONNECT: Since client will abort inflight RPC before
1687          * sending DISCONNECT, DISCONNECT may carry an XID which higher
1688          * than the inflight RPC.
1689          */
1690         if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1691                 min_xid = ptlrpc_known_replied_xid(imp);
1692         spin_unlock(&imp->imp_lock);
1693
1694         lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1695
1696         lustre_msg_set_status(req->rq_reqmsg, current->pid);
1697
1698         /* If the request to be sent is an LDLM callback, do not try to
1699          * refresh context.
1700          * An LDLM callback is sent by a server to a client in order to make
1701          * it release a lock, on a communication channel that uses a reverse
1702          * context. It cannot be refreshed on its own, as it is the 'reverse'
1703          * (server-side) representation of a client context.
1704          * We do not care if the reverse context is expired, and want to send
1705          * the LDLM callback anyway. Once the client receives the AST, it is
1706          * its job to refresh its own context if it has expired, hence
1707          * refreshing the associated reverse context on server side, before
1708          * being able to send the LDLM_CANCEL requested by the server.
1709          */
1710         if (lustre_msg_get_opc(req->rq_reqmsg) != LDLM_BL_CALLBACK &&
1711             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_CP_CALLBACK &&
1712             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_GL_CALLBACK)
1713                 rc = sptlrpc_req_refresh_ctx(req, 0);
1714         if (rc) {
1715                 if (req->rq_err) {
1716                         req->rq_status = rc;
1717                         RETURN(1);
1718                 } else {
1719                         spin_lock(&req->rq_lock);
1720                         req->rq_wait_ctx = 1;
1721                         spin_unlock(&req->rq_lock);
1722                         RETURN(0);
1723                 }
1724         }
1725
1726         CDEBUG(D_RPCTRACE,
1727                "Sending RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
1728                req, current->comm,
1729                imp->imp_obd->obd_uuid.uuid,
1730                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1731                obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg),
1732                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
1733
1734         rc = ptl_send_rpc(req, 0);
1735         if (rc == -ENOMEM) {
1736                 spin_lock(&imp->imp_lock);
1737                 if (!list_empty(&req->rq_list)) {
1738                         list_del_init(&req->rq_list);
1739                         if (atomic_dec_and_test(&req->rq_import->imp_inflight))
1740                                 wake_up(&req->rq_import->imp_recovery_waitq);
1741                 }
1742                 spin_unlock(&imp->imp_lock);
1743                 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1744                 RETURN(rc);
1745         }
1746         if (rc) {
1747                 DEBUG_REQ(D_HA, req, "send failed, expect timeout: rc = %d",
1748                           rc);
1749                 spin_lock(&req->rq_lock);
1750                 req->rq_net_err = 1;
1751                 spin_unlock(&req->rq_lock);
1752                 RETURN(rc);
1753         }
1754         RETURN(0);
1755 }
1756
1757 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1758 {
1759         int remaining, rc;
1760
1761         ENTRY;
1762         LASSERT(set->set_producer != NULL);
1763
1764         remaining = atomic_read(&set->set_remaining);
1765
1766         /*
1767          * populate the ->set_requests list with requests until we
1768          * reach the maximum number of RPCs in flight for this set
1769          */
1770         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1771                 rc = set->set_producer(set, set->set_producer_arg);
1772                 if (rc == -ENOENT) {
1773                         /* no more RPC to produce */
1774                         set->set_producer     = NULL;
1775                         set->set_producer_arg = NULL;
1776                         RETURN(0);
1777                 }
1778         }
1779
1780         RETURN((atomic_read(&set->set_remaining) - remaining));
1781 }
1782
1783 /**
1784  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1785  * and no more replies are expected.
1786  * (it is possible to get less replies than requests sent e.g. due to timed out
1787  * requests or requests that we had trouble to send out)
1788  *
1789  * NOTE: This function contains a potential schedule point (cond_resched()).
1790  */
1791 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1792 {
1793         struct ptlrpc_request *req, *next;
1794         LIST_HEAD(comp_reqs);
1795         int force_timer_recalc = 0;
1796
1797         ENTRY;
1798         if (atomic_read(&set->set_remaining) == 0)
1799                 RETURN(1);
1800
1801         list_for_each_entry_safe(req, next, &set->set_requests,
1802                                  rq_set_chain) {
1803                 struct obd_import *imp = req->rq_import;
1804                 int unregistered = 0;
1805                 int async = 1;
1806                 int rc = 0;
1807
1808                 if (req->rq_phase == RQ_PHASE_COMPLETE) {
1809                         list_move_tail(&req->rq_set_chain, &comp_reqs);
1810                         continue;
1811                 }
1812
1813                 /*
1814                  * This schedule point is mainly for the ptlrpcd caller of this
1815                  * function.  Most ptlrpc sets are not long-lived and unbounded
1816                  * in length, but at the least the set used by the ptlrpcd is.
1817                  * Since the processing time is unbounded, we need to insert an
1818                  * explicit schedule point to make the thread well-behaved.
1819                  */
1820                 cond_resched();
1821
1822                 /*
1823                  * If the caller requires to allow to be interpreted by force
1824                  * and it has really been interpreted, then move the request
1825                  * to RQ_PHASE_INTERPRET phase in spite of what the current
1826                  * phase is.
1827                  */
1828                 if (unlikely(req->rq_allow_intr && req->rq_intr)) {
1829                         req->rq_status = -EINTR;
1830                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1831
1832                         /*
1833                          * Since it is interpreted and we have to wait for
1834                          * the reply to be unlinked, then use sync mode.
1835                          */
1836                         async = 0;
1837
1838                         GOTO(interpret, req->rq_status);
1839                 }
1840
1841                 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
1842                         force_timer_recalc = 1;
1843
1844                 /* delayed send - skip */
1845                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1846                         continue;
1847
1848                 /* delayed resend - skip */
1849                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1850                     req->rq_sent > ktime_get_real_seconds())
1851                         continue;
1852
1853                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1854                       req->rq_phase == RQ_PHASE_BULK ||
1855                       req->rq_phase == RQ_PHASE_INTERPRET ||
1856                       req->rq_phase == RQ_PHASE_UNREG_RPC ||
1857                       req->rq_phase == RQ_PHASE_UNREG_BULK)) {
1858                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1859                         LBUG();
1860                 }
1861
1862                 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
1863                     req->rq_phase == RQ_PHASE_UNREG_BULK) {
1864                         LASSERT(req->rq_next_phase != req->rq_phase);
1865                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1866
1867                         if (req->rq_req_deadline &&
1868                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
1869                                 req->rq_req_deadline = 0;
1870                         if (req->rq_reply_deadline &&
1871                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
1872                                 req->rq_reply_deadline = 0;
1873                         if (req->rq_bulk_deadline &&
1874                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
1875                                 req->rq_bulk_deadline = 0;
1876
1877                         /*
1878                          * Skip processing until reply is unlinked. We
1879                          * can't return to pool before that and we can't
1880                          * call interpret before that. We need to make
1881                          * sure that all rdma transfers finished and will
1882                          * not corrupt any data.
1883                          */
1884                         if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
1885                             ptlrpc_cli_wait_unlink(req))
1886                                 continue;
1887                         if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
1888                             ptlrpc_client_bulk_active(req))
1889                                 continue;
1890
1891                         /*
1892                          * Turn fail_loc off to prevent it from looping
1893                          * forever.
1894                          */
1895                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1896                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1897                                                      OBD_FAIL_ONCE);
1898                         }
1899                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1900                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1901                                                      OBD_FAIL_ONCE);
1902                         }
1903
1904                         /*
1905                          * Move to next phase if reply was successfully
1906                          * unlinked.
1907                          */
1908                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1909                 }
1910
1911                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1912                         GOTO(interpret, req->rq_status);
1913
1914                 /*
1915                  * Note that this also will start async reply unlink.
1916                  */
1917                 if (req->rq_net_err && !req->rq_timedout) {
1918                         ptlrpc_expire_one_request(req, 1);
1919
1920                         /*
1921                          * Check if we still need to wait for unlink.
1922                          */
1923                         if (ptlrpc_cli_wait_unlink(req) ||
1924                             ptlrpc_client_bulk_active(req))
1925                                 continue;
1926                         /* If there is no need to resend, fail it now. */
1927                         if (req->rq_no_resend) {
1928                                 if (req->rq_status == 0)
1929                                         req->rq_status = -EIO;
1930                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1931                                 GOTO(interpret, req->rq_status);
1932                         } else {
1933                                 continue;
1934                         }
1935                 }
1936
1937                 if (req->rq_err) {
1938                         if (!ptlrpc_unregister_reply(req, 1)) {
1939                                 ptlrpc_unregister_bulk(req, 1);
1940                                 continue;
1941                         }
1942
1943                         spin_lock(&req->rq_lock);
1944                         req->rq_replied = 0;
1945                         spin_unlock(&req->rq_lock);
1946                         if (req->rq_status == 0)
1947                                 req->rq_status = -EIO;
1948                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1949                         GOTO(interpret, req->rq_status);
1950                 }
1951
1952                 /*
1953                  * ptlrpc_set_wait uses l_wait_event_abortable_timeout()
1954                  * so it sets rq_intr regardless of individual rpc
1955                  * timeouts. The synchronous IO waiting path sets
1956                  * rq_intr irrespective of whether ptlrpcd
1957                  * has seen a timeout.  Our policy is to only interpret
1958                  * interrupted rpcs after they have timed out, so we
1959                  * need to enforce that here.
1960                  */
1961
1962                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
1963                                      req->rq_wait_ctx)) {
1964                         req->rq_status = -EINTR;
1965                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1966                         GOTO(interpret, req->rq_status);
1967                 }
1968
1969                 if (req->rq_phase == RQ_PHASE_RPC) {
1970                         if (req->rq_timedout || req->rq_resend ||
1971                             req->rq_waiting || req->rq_wait_ctx) {
1972                                 int status;
1973
1974                                 if (!ptlrpc_unregister_reply(req, 1)) {
1975                                         ptlrpc_unregister_bulk(req, 1);
1976                                         continue;
1977                                 }
1978
1979                                 spin_lock(&imp->imp_lock);
1980                                 if (ptlrpc_import_delay_req(imp, req,
1981                                                             &status)) {
1982                                         /*
1983                                          * put on delay list - only if we wait
1984                                          * recovery finished - before send
1985                                          */
1986                                         list_move_tail(&req->rq_list,
1987                                                        &imp->imp_delayed_list);
1988                                         spin_unlock(&imp->imp_lock);
1989                                         continue;
1990                                 }
1991
1992                                 if (status != 0)  {
1993                                         req->rq_status = status;
1994                                         ptlrpc_rqphase_move(req,
1995                                                             RQ_PHASE_INTERPRET);
1996                                         spin_unlock(&imp->imp_lock);
1997                                         GOTO(interpret, req->rq_status);
1998                                 }
1999                                 /* ignore on just initiated connections */
2000                                 if (ptlrpc_no_resend(req) &&
2001                                     !req->rq_wait_ctx &&
2002                                     imp->imp_generation !=
2003                                     imp->imp_initiated_at) {
2004                                         req->rq_status = -ENOTCONN;
2005                                         ptlrpc_rqphase_move(req,
2006                                                             RQ_PHASE_INTERPRET);
2007                                         spin_unlock(&imp->imp_lock);
2008                                         GOTO(interpret, req->rq_status);
2009                                 }
2010
2011                                 /* don't resend too fast in case of network
2012                                  * errors.
2013                                  */
2014                                 if (ktime_get_real_seconds() < (req->rq_sent + 1)
2015                                     && req->rq_net_err && req->rq_timedout) {
2016
2017                                         DEBUG_REQ(D_INFO, req,
2018                                                   "throttle request");
2019                                         /* Don't try to resend RPC right away
2020                                          * as it is likely it will fail again
2021                                          * and ptlrpc_check_set() will be
2022                                          * called again, keeping this thread
2023                                          * busy. Instead, wait for the next
2024                                          * timeout. Flag it as resend to
2025                                          * ensure we don't wait to long.
2026                                          */
2027                                         req->rq_resend = 1;
2028                                         spin_unlock(&imp->imp_lock);
2029                                         continue;
2030                                 }
2031
2032                                 list_move_tail(&req->rq_list,
2033                                                &imp->imp_sending_list);
2034
2035                                 spin_unlock(&imp->imp_lock);
2036
2037                                 spin_lock(&req->rq_lock);
2038                                 req->rq_waiting = 0;
2039                                 spin_unlock(&req->rq_lock);
2040
2041                                 if (req->rq_timedout || req->rq_resend) {
2042                                         /*
2043                                          * This is re-sending anyways,
2044                                          * let's mark req as resend.
2045                                          */
2046                                         spin_lock(&req->rq_lock);
2047                                         req->rq_resend = 1;
2048                                         spin_unlock(&req->rq_lock);
2049                                 }
2050                                 /*
2051                                  * rq_wait_ctx is only touched by ptlrpcd,
2052                                  * so no lock is needed here.
2053                                  */
2054                                 status = sptlrpc_req_refresh_ctx(req, 0);
2055                                 if (status) {
2056                                         if (req->rq_err) {
2057                                                 req->rq_status = status;
2058                                                 spin_lock(&req->rq_lock);
2059                                                 req->rq_wait_ctx = 0;
2060                                                 spin_unlock(&req->rq_lock);
2061                                                 force_timer_recalc = 1;
2062                                         } else {
2063                                                 spin_lock(&req->rq_lock);
2064                                                 req->rq_wait_ctx = 1;
2065                                                 spin_unlock(&req->rq_lock);
2066                                         }
2067
2068                                         continue;
2069                                 } else {
2070                                         spin_lock(&req->rq_lock);
2071                                         req->rq_wait_ctx = 0;
2072                                         spin_unlock(&req->rq_lock);
2073                                 }
2074
2075                                 /*
2076                                  * In any case, the previous bulk should be
2077                                  * cleaned up to prepare for the new sending
2078                                  */
2079                                 if (req->rq_bulk &&
2080                                     !ptlrpc_unregister_bulk(req, 1))
2081                                         continue;
2082
2083                                 rc = ptl_send_rpc(req, 0);
2084                                 if (rc == -ENOMEM) {
2085                                         spin_lock(&imp->imp_lock);
2086                                         if (!list_empty(&req->rq_list))
2087                                                 list_del_init(&req->rq_list);
2088                                         spin_unlock(&imp->imp_lock);
2089                                         ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
2090                                         continue;
2091                                 }
2092                                 if (rc) {
2093                                         DEBUG_REQ(D_HA, req,
2094                                                   "send failed: rc = %d", rc);
2095                                         force_timer_recalc = 1;
2096                                         spin_lock(&req->rq_lock);
2097                                         req->rq_net_err = 1;
2098                                         spin_unlock(&req->rq_lock);
2099                                         continue;
2100                                 }
2101                                 /* need to reset the timeout */
2102                                 force_timer_recalc = 1;
2103                         }
2104
2105                         spin_lock(&req->rq_lock);
2106
2107                         if (ptlrpc_client_early(req)) {
2108                                 ptlrpc_at_recv_early_reply(req);
2109                                 spin_unlock(&req->rq_lock);
2110                                 continue;
2111                         }
2112
2113                         /* Still waiting for a reply? */
2114                         if (ptlrpc_client_recv(req)) {
2115                                 spin_unlock(&req->rq_lock);
2116                                 continue;
2117                         }
2118
2119                         /* Did we actually receive a reply? */
2120                         if (!ptlrpc_client_replied(req)) {
2121                                 spin_unlock(&req->rq_lock);
2122                                 continue;
2123                         }
2124
2125                         spin_unlock(&req->rq_lock);
2126
2127                         /*
2128                          * unlink from net because we are going to
2129                          * swab in-place of reply buffer
2130                          */
2131                         unregistered = ptlrpc_unregister_reply(req, 1);
2132                         if (!unregistered)
2133                                 continue;
2134
2135                         req->rq_status = after_reply(req);
2136                         if (req->rq_resend) {
2137                                 force_timer_recalc = 1;
2138                                 continue;
2139                         }
2140
2141                         /*
2142                          * If there is no bulk associated with this request,
2143                          * then we're done and should let the interpreter
2144                          * process the reply. Similarly if the RPC returned
2145                          * an error, and therefore the bulk will never arrive.
2146                          */
2147                         if (!req->rq_bulk || req->rq_status < 0) {
2148                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2149                                 GOTO(interpret, req->rq_status);
2150                         }
2151
2152                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
2153                 }
2154
2155                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
2156                 if (ptlrpc_client_bulk_active(req))
2157                         continue;
2158
2159                 if (req->rq_bulk->bd_failure) {
2160                         /*
2161                          * The RPC reply arrived OK, but the bulk screwed
2162                          * up!  Dead weird since the server told us the RPC
2163                          * was good after getting the REPLY for her GET or
2164                          * the ACK for her PUT.
2165                          */
2166                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed %d/%d/%d",
2167                                   req->rq_status,
2168                                   req->rq_bulk->bd_nob,
2169                                   req->rq_bulk->bd_nob_transferred);
2170                         req->rq_status = -EIO;
2171                 }
2172
2173                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2174
2175 interpret:
2176                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
2177
2178                 /*
2179                  * This moves to "unregistering" phase we need to wait for
2180                  * reply unlink.
2181                  */
2182                 if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
2183                         /* start async bulk unlink too */
2184                         ptlrpc_unregister_bulk(req, 1);
2185                         continue;
2186                 }
2187
2188                 if (!ptlrpc_unregister_bulk(req, async))
2189                         continue;
2190
2191                 /*
2192                  * When calling interpret receiving already should be
2193                  * finished.
2194                  */
2195                 LASSERT(!req->rq_receiving_reply);
2196
2197                 ptlrpc_req_interpret(env, req, req->rq_status);
2198
2199                 if (ptlrpcd_check_work(req)) {
2200                         atomic_dec(&set->set_remaining);
2201                         continue;
2202                 }
2203                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
2204
2205                 if (req->rq_reqmsg)
2206                         CDEBUG(D_RPCTRACE,
2207                                "Completed RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
2208                                req, current->comm,
2209                                imp->imp_obd->obd_uuid.uuid,
2210                                lustre_msg_get_status(req->rq_reqmsg),
2211                                req->rq_xid,
2212                                obd_import_nid2str(imp),
2213                                lustre_msg_get_opc(req->rq_reqmsg),
2214                                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
2215
2216                 spin_lock(&imp->imp_lock);
2217                 /*
2218                  * Request already may be not on sending or delaying list. This
2219                  * may happen in the case of marking it erroneous for the case
2220                  * ptlrpc_import_delay_req(req, status) find it impossible to
2221                  * allow sending this rpc and returns *status != 0.
2222                  */
2223                 if (!list_empty(&req->rq_list)) {
2224                         list_del_init(&req->rq_list);
2225                         if (atomic_dec_and_test(&imp->imp_inflight))
2226                                 wake_up(&imp->imp_recovery_waitq);
2227                 }
2228                 list_del_init(&req->rq_unreplied_list);
2229                 spin_unlock(&imp->imp_lock);
2230
2231                 atomic_dec(&set->set_remaining);
2232                 wake_up(&imp->imp_recovery_waitq);
2233
2234                 if (set->set_producer) {
2235                         /* produce a new request if possible */
2236                         if (ptlrpc_set_producer(set) > 0)
2237                                 force_timer_recalc = 1;
2238
2239                         /*
2240                          * free the request that has just been completed
2241                          * in order not to pollute set->set_requests
2242                          */
2243                         list_del_init(&req->rq_set_chain);
2244                         spin_lock(&req->rq_lock);
2245                         req->rq_set = NULL;
2246                         req->rq_invalid_rqset = 0;
2247                         spin_unlock(&req->rq_lock);
2248
2249                         /* record rq_status to compute the final status later */
2250                         if (req->rq_status != 0)
2251                                 set->set_rc = req->rq_status;
2252                         ptlrpc_req_finished(req);
2253                 } else {
2254                         list_move_tail(&req->rq_set_chain, &comp_reqs);
2255                 }
2256         }
2257
2258         /*
2259          * move completed request at the head of list so it's easier for
2260          * caller to find them
2261          */
2262         list_splice(&comp_reqs, &set->set_requests);
2263
2264         /* If we hit an error, we want to recover promptly. */
2265         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
2266 }
2267 EXPORT_SYMBOL(ptlrpc_check_set);
2268
2269 /**
2270  * Time out request \a req. is \a async_unlink is set, that means do not wait
2271  * until LNet actually confirms network buffer unlinking.
2272  * Return 1 if we should give up further retrying attempts or 0 otherwise.
2273  */
2274 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
2275 {
2276         struct obd_import *imp = req->rq_import;
2277         unsigned int debug_mask = D_RPCTRACE;
2278         int rc = 0;
2279
2280         ENTRY;
2281         spin_lock(&req->rq_lock);
2282         req->rq_timedout = 1;
2283         spin_unlock(&req->rq_lock);
2284
2285         if (ptlrpc_console_allow(req, lustre_msg_get_opc(req->rq_reqmsg),
2286                                  lustre_msg_get_status(req->rq_reqmsg)))
2287                 debug_mask = D_WARNING;
2288         DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
2289                   req->rq_net_err ? "failed due to network error" :
2290                      ((req->rq_real_sent == 0 ||
2291                        req->rq_real_sent < req->rq_sent ||
2292                        req->rq_real_sent >= req->rq_deadline) ?
2293                       "timed out for sent delay" : "timed out for slow reply"),
2294                   req->rq_sent, req->rq_real_sent);
2295
2296         if (imp && obd_debug_peer_on_timeout)
2297                 LNetDebugPeer(&imp->imp_connection->c_peer);
2298
2299         ptlrpc_unregister_reply(req, async_unlink);
2300         ptlrpc_unregister_bulk(req, async_unlink);
2301
2302         if (obd_dump_on_timeout)
2303                 libcfs_debug_dumplog();
2304
2305         if (!imp) {
2306                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
2307                 RETURN(1);
2308         }
2309
2310         atomic_inc(&imp->imp_timeouts);
2311
2312         /* The DLM server doesn't want recovery run on its imports. */
2313         if (imp->imp_dlm_fake)
2314                 RETURN(1);
2315
2316         /*
2317          * If this request is for recovery or other primordial tasks,
2318          * then error it out here.
2319          */
2320         if (req->rq_ctx_init || req->rq_ctx_fini ||
2321             req->rq_send_state != LUSTRE_IMP_FULL ||
2322             imp->imp_obd->obd_no_recov) {
2323                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
2324                           ptlrpc_import_state_name(req->rq_send_state),
2325                           ptlrpc_import_state_name(imp->imp_state));
2326                 spin_lock(&req->rq_lock);
2327                 req->rq_status = -ETIMEDOUT;
2328                 req->rq_err = 1;
2329                 spin_unlock(&req->rq_lock);
2330                 RETURN(1);
2331         }
2332
2333         /*
2334          * if a request can't be resent we can't wait for an answer after
2335          * the timeout
2336          */
2337         if (ptlrpc_no_resend(req)) {
2338                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
2339                 rc = 1;
2340         }
2341
2342         ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
2343
2344         RETURN(rc);
2345 }
2346
2347 /**
2348  * Time out all uncompleted requests in request set pointed by \a data
2349  * This is called when a wait times out.
2350  */
2351 void ptlrpc_expired_set(struct ptlrpc_request_set *set)
2352 {
2353         struct ptlrpc_request *req;
2354         time64_t now = ktime_get_real_seconds();
2355
2356         ENTRY;
2357         LASSERT(set != NULL);
2358
2359         /*
2360          * A timeout expired. See which reqs it applies to...
2361          */
2362         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2363                 /* don't expire request waiting for context */
2364                 if (req->rq_wait_ctx)
2365                         continue;
2366
2367                 /* Request in-flight? */
2368                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2369                        !req->rq_waiting && !req->rq_resend) ||
2370                       (req->rq_phase == RQ_PHASE_BULK)))
2371                         continue;
2372
2373                 if (req->rq_timedout ||     /* already dealt with */
2374                     req->rq_deadline > now) /* not expired */
2375                         continue;
2376
2377                 /*
2378                  * Deal with this guy. Do it asynchronously to not block
2379                  * ptlrpcd thread.
2380                  */
2381                 ptlrpc_expire_one_request(req, 1);
2382                 /*
2383                  * Loops require that we resched once in a while to avoid
2384                  * RCU stalls and a few other problems.
2385                  */
2386                 cond_resched();
2387
2388         }
2389 }
2390
2391 /**
2392  * Interrupts (sets interrupted flag) all uncompleted requests in
2393  * a set \a data. This is called when a wait_event is interrupted
2394  * by a signal.
2395  */
2396 static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set)
2397 {
2398         struct ptlrpc_request *req;
2399
2400         LASSERT(set != NULL);
2401         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2402
2403         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2404                 if (req->rq_intr)
2405                         continue;
2406
2407                 if (req->rq_phase != RQ_PHASE_RPC &&
2408                     req->rq_phase != RQ_PHASE_UNREG_RPC &&
2409                     !req->rq_allow_intr)
2410                         continue;
2411
2412                 spin_lock(&req->rq_lock);
2413                 req->rq_intr = 1;
2414                 spin_unlock(&req->rq_lock);
2415         }
2416 }
2417
2418 /**
2419  * Get the smallest timeout in the set; this does NOT set a timeout.
2420  */
2421 time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2422 {
2423         time64_t now = ktime_get_real_seconds();
2424         int timeout = 0;
2425         struct ptlrpc_request *req;
2426         time64_t deadline;
2427
2428         ENTRY;
2429         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2430                 /* Request in-flight? */
2431                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2432                       (req->rq_phase == RQ_PHASE_BULK) ||
2433                       (req->rq_phase == RQ_PHASE_NEW)))
2434                         continue;
2435
2436                 /* Already timed out. */
2437                 if (req->rq_timedout)
2438                         continue;
2439
2440                 /* Waiting for ctx. */
2441                 if (req->rq_wait_ctx)
2442                         continue;
2443
2444                 if (req->rq_phase == RQ_PHASE_NEW)
2445                         deadline = req->rq_sent;
2446                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2447                         deadline = req->rq_sent;
2448                 else
2449                         deadline = req->rq_sent + req->rq_timeout;
2450
2451                 if (deadline <= now)    /* actually expired already */
2452                         timeout = 1;    /* ASAP */
2453                 else if (timeout == 0 || timeout > deadline - now)
2454                         timeout = deadline - now;
2455         }
2456         RETURN(timeout);
2457 }
2458
2459 /**
2460  * Send all unset request from the set and then wait untill all
2461  * requests in the set complete (either get a reply, timeout, get an
2462  * error or otherwise be interrupted).
2463  * Returns 0 on success or error code otherwise.
2464  */
2465 int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set)
2466 {
2467         struct ptlrpc_request *req;
2468         time64_t timeout;
2469         int rc;
2470
2471         ENTRY;
2472         if (set->set_producer)
2473                 (void)ptlrpc_set_producer(set);
2474         else
2475                 list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2476                         if (req->rq_phase == RQ_PHASE_NEW)
2477                                 (void)ptlrpc_send_new_req(req);
2478                 }
2479
2480         if (list_empty(&set->set_requests))
2481                 RETURN(0);
2482
2483         do {
2484                 timeout = ptlrpc_set_next_timeout(set);
2485
2486                 /*
2487                  * wait until all complete, interrupted, or an in-flight
2488                  * req times out
2489                  */
2490                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
2491                        set, timeout);
2492
2493                 if ((timeout == 0 && !signal_pending(current)) ||
2494                     set->set_allow_intr) {
2495                         /*
2496                          * No requests are in-flight (ether timed out
2497                          * or delayed), so we can allow interrupts.
2498                          * We still want to block for a limited time,
2499                          * so we allow interrupts during the timeout.
2500                          */
2501                         rc = l_wait_event_abortable_timeout(
2502                                 set->set_waitq,
2503                                 ptlrpc_check_set(NULL, set),
2504                                 cfs_time_seconds(timeout ? timeout : 1));
2505                         if (rc == 0) {
2506                                 rc = -ETIMEDOUT;
2507                                 ptlrpc_expired_set(set);
2508                         } else if (rc < 0) {
2509                                 rc = -EINTR;
2510                                 ptlrpc_interrupted_set(set);
2511                         } else {
2512                                 rc = 0;
2513                         }
2514                 } else {
2515                         /*
2516                          * At least one request is in flight, so no
2517                          * interrupts are allowed. Wait until all
2518                          * complete, or an in-flight req times out.
2519                          */
2520                         rc = wait_event_idle_timeout(
2521                                 set->set_waitq,
2522                                 ptlrpc_check_set(NULL, set),
2523                                 cfs_time_seconds(timeout ? timeout : 1));
2524                         if (rc == 0) {
2525                                 ptlrpc_expired_set(set);
2526                                 rc = -ETIMEDOUT;
2527                         } else {
2528                                 rc = 0;
2529                         }
2530
2531                         /*
2532                          * LU-769 - if we ignored the signal because
2533                          * it was already pending when we started, we
2534                          * need to handle it now or we risk it being
2535                          * ignored forever
2536                          */
2537                         if (rc == -ETIMEDOUT &&
2538                             signal_pending(current)) {
2539                                 sigset_t old, new;
2540
2541                                 siginitset(&new, LUSTRE_FATAL_SIGS);
2542                                 sigprocmask(SIG_BLOCK, &new, &old);
2543                                 /*
2544                                  * In fact we only interrupt for the
2545                                  * "fatal" signals like SIGINT or
2546                                  * SIGKILL. We still ignore less
2547                                  * important signals since ptlrpc set
2548                                  * is not easily reentrant from
2549                                  * userspace again
2550                                  */
2551                                 if (signal_pending(current))
2552                                         ptlrpc_interrupted_set(set);
2553                                 sigprocmask(SIG_SETMASK, &old, NULL);
2554                         }
2555                 }
2556
2557                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2558
2559                 /*
2560                  * -EINTR => all requests have been flagged rq_intr so next
2561                  * check completes.
2562                  * -ETIMEDOUT => someone timed out.  When all reqs have
2563                  * timed out, signals are enabled allowing completion with
2564                  * EINTR.
2565                  * I don't really care if we go once more round the loop in
2566                  * the error cases -eeb.
2567                  */
2568                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2569                         list_for_each_entry(req, &set->set_requests,
2570                                             rq_set_chain) {
2571                                 spin_lock(&req->rq_lock);
2572                                 req->rq_invalid_rqset = 1;
2573                                 spin_unlock(&req->rq_lock);
2574                         }
2575                 }
2576         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2577
2578         LASSERT(atomic_read(&set->set_remaining) == 0);
2579
2580         rc = set->set_rc; /* rq_status of already freed requests if any */
2581         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2582                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2583                 if (req->rq_status != 0)
2584                         rc = req->rq_status;
2585         }
2586
2587         RETURN(rc);
2588 }
2589 EXPORT_SYMBOL(ptlrpc_set_wait);
2590
2591 /**
2592  * Helper fuction for request freeing.
2593  * Called when request count reached zero and request needs to be freed.
2594  * Removes request from all sorts of sending/replay lists it might be on,
2595  * frees network buffers if any are present.
2596  * If \a locked is set, that means caller is already holding import imp_lock
2597  * and so we no longer need to reobtain it (for certain lists manipulations)
2598  */
2599 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2600 {
2601         ENTRY;
2602
2603         if (!request)
2604                 RETURN_EXIT;
2605
2606         LASSERT(!request->rq_srv_req);
2607         LASSERT(request->rq_export == NULL);
2608         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
2609         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
2610         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
2611         LASSERTF(!request->rq_replay, "req %p\n", request);
2612
2613         req_capsule_fini(&request->rq_pill);
2614
2615         /*
2616          * We must take it off the imp_replay_list first.  Otherwise, we'll set
2617          * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2618          */
2619         if (request->rq_import) {
2620                 if (!locked)
2621                         spin_lock(&request->rq_import->imp_lock);
2622                 list_del_init(&request->rq_replay_list);
2623                 list_del_init(&request->rq_unreplied_list);
2624                 if (!locked)
2625                         spin_unlock(&request->rq_import->imp_lock);
2626         }
2627         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
2628
2629         if (atomic_read(&request->rq_refcount) != 0) {
2630                 DEBUG_REQ(D_ERROR, request,
2631                           "freeing request with nonzero refcount");
2632                 LBUG();
2633         }
2634
2635         if (request->rq_repbuf)
2636                 sptlrpc_cli_free_repbuf(request);
2637
2638         if (request->rq_import) {
2639                 if (!ptlrpcd_check_work(request)) {
2640                         LASSERT(atomic_read(&request->rq_import->imp_reqs) > 0);
2641                         atomic_dec(&request->rq_import->imp_reqs);
2642                 }
2643                 class_import_put(request->rq_import);
2644                 request->rq_import = NULL;
2645         }
2646         if (request->rq_bulk)
2647                 ptlrpc_free_bulk(request->rq_bulk);
2648
2649         if (request->rq_reqbuf || request->rq_clrbuf)
2650                 sptlrpc_cli_free_reqbuf(request);
2651
2652         if (request->rq_cli_ctx)
2653                 sptlrpc_req_put_ctx(request, !locked);
2654
2655         if (request->rq_pool)
2656                 __ptlrpc_free_req_to_pool(request);
2657         else
2658                 ptlrpc_request_cache_free(request);
2659         EXIT;
2660 }
2661
2662 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
2663 /**
2664  * Drop one request reference. Must be called with import imp_lock held.
2665  * When reference count drops to zero, request is freed.
2666  */
2667 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
2668 {
2669         assert_spin_locked(&request->rq_import->imp_lock);
2670         (void)__ptlrpc_req_finished(request, 1);
2671 }
2672
2673 /**
2674  * Helper function
2675  * Drops one reference count for request \a request.
2676  * \a locked set indicates that caller holds import imp_lock.
2677  * Frees the request whe reference count reaches zero.
2678  *
2679  * \retval 1    the request is freed
2680  * \retval 0    some others still hold references on the request
2681  */
2682 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
2683 {
2684         int count;
2685
2686         ENTRY;
2687         if (!request)
2688                 RETURN(1);
2689
2690         LASSERT(request != LP_POISON);
2691         LASSERT(request->rq_reqmsg != LP_POISON);
2692
2693         DEBUG_REQ(D_INFO, request, "refcount now %u",
2694                   atomic_read(&request->rq_refcount) - 1);
2695
2696         spin_lock(&request->rq_lock);
2697         count = atomic_dec_return(&request->rq_refcount);
2698         LASSERTF(count >= 0, "Invalid ref count %d\n", count);
2699
2700         /*
2701          * For open RPC, the client does not know the EA size (LOV, ACL, and
2702          * so on) before replied, then the client has to reserve very large
2703          * reply buffer. Such buffer will not be released until the RPC freed.
2704          * Since The open RPC is replayable, we need to keep it in the replay
2705          * list until close. If there are a lot of files opened concurrently,
2706          * then the client may be OOM.
2707          *
2708          * If fact, it is unnecessary to keep reply buffer for open replay,
2709          * related EAs have already been saved via mdc_save_lovea() before
2710          * coming here. So it is safe to free the reply buffer some earlier
2711          * before releasing the RPC to avoid client OOM. LU-9514
2712          */
2713         if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
2714                 spin_lock(&request->rq_early_free_lock);
2715                 sptlrpc_cli_free_repbuf(request);
2716                 request->rq_repbuf = NULL;
2717                 request->rq_repbuf_len = 0;
2718                 request->rq_repdata = NULL;
2719                 request->rq_reqdata_len = 0;
2720                 spin_unlock(&request->rq_early_free_lock);
2721         }
2722         spin_unlock(&request->rq_lock);
2723
2724         if (!count)
2725                 __ptlrpc_free_req(request, locked);
2726
2727         RETURN(!count);
2728 }
2729
2730 /**
2731  * Drops one reference count for a request.
2732  */
2733 void ptlrpc_req_finished(struct ptlrpc_request *request)
2734 {
2735         __ptlrpc_req_finished(request, 0);
2736 }
2737 EXPORT_SYMBOL(ptlrpc_req_finished);
2738
2739 /**
2740  * Returns xid of a \a request
2741  */
2742 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2743 {
2744         return request->rq_xid;
2745 }
2746 EXPORT_SYMBOL(ptlrpc_req_xid);
2747
2748 /**
2749  * Disengage the client's reply buffer from the network
2750  * NB does _NOT_ unregister any client-side bulk.
2751  * IDEMPOTENT, but _not_ safe against concurrent callers.
2752  * The request owner (i.e. the thread doing the I/O) must call...
2753  * Returns 0 on success or 1 if unregistering cannot be made.
2754  */
2755 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2756 {
2757         bool discard = false;
2758         /*
2759          * Might sleep.
2760          */
2761         LASSERT(!in_interrupt());
2762
2763         /* Let's setup deadline for reply unlink. */
2764         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2765             async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
2766                 request->rq_reply_deadline = ktime_get_real_seconds() +
2767                                              PTLRPC_REQ_LONG_UNLINK;
2768
2769         /*
2770          * Nothing left to do.
2771          */
2772         if (!__ptlrpc_cli_wait_unlink(request, &discard))
2773                 RETURN(1);
2774
2775         LNetMDUnlink(request->rq_reply_md_h);
2776
2777         if (discard) /* Discard the request-out callback */
2778                 __LNetMDUnlink(request->rq_req_md_h, discard);
2779
2780         /*
2781          * Let's check it once again.
2782          */
2783         if (!ptlrpc_cli_wait_unlink(request))
2784                 RETURN(1);
2785
2786         /* Move to "Unregistering" phase as reply was not unlinked yet. */
2787         ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
2788
2789         /*
2790          * Do not wait for unlink to finish.
2791          */
2792         if (async)
2793                 RETURN(0);
2794
2795         /*
2796          * We have to wait_event_idle_timeout() whatever the result, to get
2797          * a chance to run reply_in_callback(), and to make sure we've
2798          * unlinked before returning a req to the pool.
2799          */
2800         for (;;) {
2801                 wait_queue_head_t *wq = (request->rq_set) ?
2802                                         &request->rq_set->set_waitq :
2803                                         &request->rq_reply_waitq;
2804                 int seconds = PTLRPC_REQ_LONG_UNLINK;
2805                 /*
2806                  * Network access will complete in finite time but the HUGE
2807                  * timeout lets us CWARN for visibility of sluggish NALs
2808                  */
2809                 while (seconds > 0 &&
2810                        wait_event_idle_timeout(
2811                                *wq,
2812                                !ptlrpc_cli_wait_unlink(request),
2813                                cfs_time_seconds(1)) == 0)
2814                         seconds -= 1;
2815                 if (seconds > 0) {
2816                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2817                         RETURN(1);
2818                 }
2819
2820                 DEBUG_REQ(D_WARNING, request,
2821                           "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2822                           request->rq_receiving_reply,
2823                           request->rq_req_unlinked,
2824                           request->rq_reply_unlinked);
2825         }
2826         RETURN(0);
2827 }
2828
2829 static void ptlrpc_free_request(struct ptlrpc_request *req)
2830 {
2831         spin_lock(&req->rq_lock);
2832         req->rq_replay = 0;
2833         spin_unlock(&req->rq_lock);
2834
2835         if (req->rq_commit_cb)
2836                 req->rq_commit_cb(req);
2837         list_del_init(&req->rq_replay_list);
2838
2839         __ptlrpc_req_finished(req, 1);
2840 }
2841
2842 /**
2843  * the request is committed and dropped from the replay list of its import
2844  */
2845 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2846 {
2847         struct obd_import *imp = req->rq_import;
2848
2849         spin_lock(&imp->imp_lock);
2850         if (list_empty(&req->rq_replay_list)) {
2851                 spin_unlock(&imp->imp_lock);
2852                 return;
2853         }
2854
2855         if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
2856                 if (imp->imp_replay_cursor == &req->rq_replay_list)
2857                         imp->imp_replay_cursor = req->rq_replay_list.next;
2858                 ptlrpc_free_request(req);
2859         }
2860
2861         spin_unlock(&imp->imp_lock);
2862 }
2863 EXPORT_SYMBOL(ptlrpc_request_committed);
2864
2865 /**
2866  * Iterates through replay_list on import and prunes
2867  * all requests have transno smaller than last_committed for the
2868  * import and don't have rq_replay set.
2869  * Since requests are sorted in transno order, stops when meetign first
2870  * transno bigger than last_committed.
2871  * caller must hold imp->imp_lock
2872  */
2873 void ptlrpc_free_committed(struct obd_import *imp)
2874 {
2875         struct ptlrpc_request *req, *saved;
2876         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
2877         bool skip_committed_list = true;
2878
2879         ENTRY;
2880         LASSERT(imp != NULL);
2881         assert_spin_locked(&imp->imp_lock);
2882
2883         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
2884             imp->imp_generation == imp->imp_last_generation_checked) {
2885                 CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
2886                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
2887                 RETURN_EXIT;
2888         }
2889         CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
2890                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
2891                imp->imp_generation);
2892
2893         if (imp->imp_generation != imp->imp_last_generation_checked ||
2894             imp->imp_last_transno_checked == 0)
2895                 skip_committed_list = false;
2896
2897         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
2898         imp->imp_last_generation_checked = imp->imp_generation;
2899
2900         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2901                                  rq_replay_list) {
2902                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2903                 LASSERT(req != last_req);
2904                 last_req = req;
2905
2906                 if (req->rq_transno == 0) {
2907                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2908                         LBUG();
2909                 }
2910                 if (req->rq_import_generation < imp->imp_generation) {
2911                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2912                         GOTO(free_req, 0);
2913                 }
2914
2915                 /* not yet committed */
2916                 if (req->rq_transno > imp->imp_peer_committed_transno) {
2917                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2918                         break;
2919                 }
2920
2921                 if (req->rq_replay) {
2922                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
2923                         list_move_tail(&req->rq_replay_list,
2924                                        &imp->imp_committed_list);
2925                         continue;
2926                 }
2927
2928                 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
2929                           imp->imp_peer_committed_transno);
2930 free_req:
2931                 ptlrpc_free_request(req);
2932         }
2933
2934         if (skip_committed_list)
2935                 GOTO(out, 0);
2936
2937         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
2938                                  rq_replay_list) {
2939                 LASSERT(req->rq_transno != 0);
2940                 if (req->rq_import_generation < imp->imp_generation ||
2941                     !req->rq_replay) {
2942                         DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
2943                                   req->rq_import_generation <
2944                                   imp->imp_generation ? "stale" : "closed");
2945
2946                         if (imp->imp_replay_cursor == &req->rq_replay_list)
2947                                 imp->imp_replay_cursor =
2948                                         req->rq_replay_list.next;
2949
2950                         ptlrpc_free_request(req);
2951                 }
2952         }
2953 out:
2954         EXIT;
2955 }
2956
2957 void ptlrpc_cleanup_client(struct obd_import *imp)
2958 {
2959         ENTRY;
2960         EXIT;
2961 }
2962
2963 /**
2964  * Schedule previously sent request for resend.
2965  * For bulk requests we assign new xid (to avoid problems with
2966  * lost replies and therefore several transfers landing into same buffer
2967  * from different sending attempts).
2968  */
2969 void ptlrpc_resend_req(struct ptlrpc_request *req)
2970 {
2971         DEBUG_REQ(D_HA, req, "going to resend");
2972         spin_lock(&req->rq_lock);
2973
2974         /*
2975          * Request got reply but linked to the import list still.
2976          * Let ptlrpc_check_set() process it.
2977          */
2978         if (ptlrpc_client_replied(req)) {
2979                 spin_unlock(&req->rq_lock);
2980                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
2981                 return;
2982         }
2983
2984         req->rq_status = -EAGAIN;
2985
2986         req->rq_resend = 1;
2987         req->rq_net_err = 0;
2988         req->rq_timedout = 0;
2989
2990         ptlrpc_client_wake_req(req);
2991         spin_unlock(&req->rq_lock);
2992 }
2993
2994 /* XXX: this function and rq_status are currently unused */
2995 void ptlrpc_restart_req(struct ptlrpc_request *req)
2996 {
2997         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
2998         req->rq_status = -ERESTARTSYS;
2999
3000         spin_lock(&req->rq_lock);
3001         req->rq_restart = 1;
3002         req->rq_timedout = 0;
3003         ptlrpc_client_wake_req(req);
3004         spin_unlock(&req->rq_lock);
3005 }
3006
3007 /**
3008  * Grab additional reference on a request \a req
3009  */
3010 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
3011 {
3012         ENTRY;
3013         atomic_inc(&req->rq_refcount);
3014         RETURN(req);
3015 }
3016 EXPORT_SYMBOL(ptlrpc_request_addref);
3017
3018 /**
3019  * Add a request to import replay_list.
3020  * Must be called under imp_lock
3021  */
3022 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
3023                                       struct obd_import *imp)
3024 {
3025         struct ptlrpc_request *iter;
3026
3027         assert_spin_locked(&imp->imp_lock);
3028
3029         if (req->rq_transno == 0) {
3030                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
3031                 LBUG();
3032         }
3033
3034         /*
3035          * clear this for new requests that were resent as well
3036          * as resent replayed requests.
3037          */
3038         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3039
3040         /* don't re-add requests that have been replayed */
3041         if (!list_empty(&req->rq_replay_list))
3042                 return;
3043
3044         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
3045
3046         spin_lock(&req->rq_lock);
3047         req->rq_resend = 0;
3048         spin_unlock(&req->rq_lock);
3049
3050         LASSERT(imp->imp_replayable);
3051         /* Balanced in ptlrpc_free_committed, usually. */
3052         ptlrpc_request_addref(req);
3053         list_for_each_entry_reverse(iter, &imp->imp_replay_list,
3054                                     rq_replay_list) {
3055                 /*
3056                  * We may have duplicate transnos if we create and then
3057                  * open a file, or for closes retained if to match creating
3058                  * opens, so use req->rq_xid as a secondary key.
3059                  * (See bugs 684, 685, and 428.)
3060                  * XXX no longer needed, but all opens need transnos!
3061                  */
3062                 if (iter->rq_transno > req->rq_transno)
3063                         continue;
3064
3065                 if (iter->rq_transno == req->rq_transno) {
3066                         LASSERT(iter->rq_xid != req->rq_xid);
3067                         if (iter->rq_xid > req->rq_xid)
3068                                 continue;
3069                 }
3070
3071                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
3072                 return;
3073         }
3074
3075         list_add(&req->rq_replay_list, &imp->imp_replay_list);
3076 }
3077
3078 /**
3079  * Send request and wait until it completes.
3080  * Returns request processing status.
3081  */
3082 int ptlrpc_queue_wait(struct ptlrpc_request *req)
3083 {
3084         struct ptlrpc_request_set *set;
3085         int rc;
3086
3087         ENTRY;
3088         LASSERT(req->rq_set == NULL);
3089         LASSERT(!req->rq_receiving_reply);
3090
3091         set = ptlrpc_prep_set();
3092         if (!set) {
3093                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
3094                 RETURN(-ENOMEM);
3095         }
3096
3097         /* for distributed debugging */
3098         lustre_msg_set_status(req->rq_reqmsg, current->pid);
3099
3100         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
3101         ptlrpc_request_addref(req);
3102         ptlrpc_set_add_req(set, req);
3103         rc = ptlrpc_set_wait(NULL, set);
3104         ptlrpc_set_destroy(set);
3105
3106         RETURN(rc);
3107 }
3108 EXPORT_SYMBOL(ptlrpc_queue_wait);
3109
3110 /**
3111  * Callback used for replayed requests reply processing.
3112  * In case of successful reply calls registered request replay callback.
3113  * In case of error restart replay process.
3114  */
3115 static int ptlrpc_replay_interpret(const struct lu_env *env,
3116                                    struct ptlrpc_request *req,
3117                                    void *args, int rc)
3118 {
3119         struct ptlrpc_replay_async_args *aa = args;
3120         struct obd_import *imp = req->rq_import;
3121
3122         ENTRY;
3123         atomic_dec(&imp->imp_replay_inflight);
3124
3125         /*
3126          * Note: if it is bulk replay (MDS-MDS replay), then even if
3127          * server got the request, but bulk transfer timeout, let's
3128          * replay the bulk req again
3129          */
3130         if (!ptlrpc_client_replied(req) ||
3131             (req->rq_bulk &&
3132              lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
3133                 DEBUG_REQ(D_ERROR, req, "request replay timed out");
3134                 GOTO(out, rc = -ETIMEDOUT);
3135         }
3136
3137         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
3138             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
3139             lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
3140                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
3141
3142         /** VBR: check version failure */
3143         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
3144                 /** replay was failed due to version mismatch */
3145                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay");
3146                 spin_lock(&imp->imp_lock);
3147                 imp->imp_vbr_failed = 1;
3148                 spin_unlock(&imp->imp_lock);
3149                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3150         } else {
3151                 /** The transno had better not change over replay. */
3152                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
3153                          lustre_msg_get_transno(req->rq_repmsg) ||
3154                          lustre_msg_get_transno(req->rq_repmsg) == 0,
3155                          "%#llx/%#llx\n",
3156                          lustre_msg_get_transno(req->rq_reqmsg),
3157                          lustre_msg_get_transno(req->rq_repmsg));
3158         }
3159
3160         spin_lock(&imp->imp_lock);
3161         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
3162         spin_unlock(&imp->imp_lock);
3163         LASSERT(imp->imp_last_replay_transno);
3164
3165         /* transaction number shouldn't be bigger than the latest replayed */
3166         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
3167                 DEBUG_REQ(D_ERROR, req,
3168                           "Reported transno=%llu is bigger than replayed=%llu",
3169                           req->rq_transno,
3170                           lustre_msg_get_transno(req->rq_reqmsg));
3171                 GOTO(out, rc = -EINVAL);
3172         }
3173
3174         DEBUG_REQ(D_HA, req, "got reply");
3175
3176         /* let the callback do fixups, possibly including in the request */
3177         if (req->rq_replay_cb)
3178                 req->rq_replay_cb(req);
3179
3180         if (ptlrpc_client_replied(req) &&
3181             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
3182                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
3183                           lustre_msg_get_status(req->rq_repmsg),
3184                           aa->praa_old_status);
3185
3186                 /*
3187                  * Note: If the replay fails for MDT-MDT recovery, let's
3188                  * abort all of the following requests in the replay
3189                  * and sending list, because MDT-MDT update requests
3190                  * are dependent on each other, see LU-7039
3191                  */
3192                 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
3193                         struct ptlrpc_request *free_req;
3194                         struct ptlrpc_request *tmp;
3195
3196                         spin_lock(&imp->imp_lock);
3197                         list_for_each_entry_safe(free_req, tmp,
3198                                                  &imp->imp_replay_list,
3199                                                  rq_replay_list) {
3200                                 ptlrpc_free_request(free_req);
3201                         }
3202
3203                         list_for_each_entry_safe(free_req, tmp,
3204                                                  &imp->imp_committed_list,
3205                                                  rq_replay_list) {
3206                                 ptlrpc_free_request(free_req);
3207                         }
3208
3209                         list_for_each_entry_safe(free_req, tmp,
3210                                                  &imp->imp_delayed_list,
3211                                                  rq_list) {
3212                                 spin_lock(&free_req->rq_lock);
3213                                 free_req->rq_err = 1;
3214                                 free_req->rq_status = -EIO;
3215                                 ptlrpc_client_wake_req(free_req);
3216                                 spin_unlock(&free_req->rq_lock);
3217                         }
3218
3219                         list_for_each_entry_safe(free_req, tmp,
3220                                                  &imp->imp_sending_list,
3221                                                  rq_list) {
3222                                 spin_lock(&free_req->rq_lock);
3223                                 free_req->rq_err = 1;
3224                                 free_req->rq_status = -EIO;
3225                                 ptlrpc_client_wake_req(free_req);
3226                                 spin_unlock(&free_req->rq_lock);
3227                         }
3228                         spin_unlock(&imp->imp_lock);
3229                 }
3230         } else {
3231                 /* Put it back for re-replay. */
3232                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3233         }
3234
3235         /*
3236          * Errors while replay can set transno to 0, but
3237          * imp_last_replay_transno shouldn't be set to 0 anyway
3238          */
3239         if (req->rq_transno == 0)
3240                 CERROR("Transno is 0 during replay!\n");
3241
3242         /* continue with recovery */
3243         rc = ptlrpc_import_recovery_state_machine(imp);
3244  out:
3245         req->rq_send_state = aa->praa_old_state;
3246
3247         if (rc != 0)
3248                 /* this replay failed, so restart recovery */
3249                 ptlrpc_connect_import(imp);
3250
3251         RETURN(rc);
3252 }
3253
3254 /**
3255  * Prepares and queues request for replay.
3256  * Adds it to ptlrpcd queue for actual sending.
3257  * Returns 0 on success.
3258  */
3259 int ptlrpc_replay_req(struct ptlrpc_request *req)
3260 {
3261         struct ptlrpc_replay_async_args *aa;
3262
3263         ENTRY;
3264
3265         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
3266
3267         aa = ptlrpc_req_async_args(aa, req);
3268         memset(aa, 0, sizeof(*aa));
3269
3270         /* Prepare request to be resent with ptlrpcd */
3271         aa->praa_old_state = req->rq_send_state;
3272         req->rq_send_state = LUSTRE_IMP_REPLAY;
3273         req->rq_phase = RQ_PHASE_NEW;
3274         req->rq_next_phase = RQ_PHASE_UNDEFINED;
3275         if (req->rq_repmsg)
3276                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
3277         req->rq_status = 0;
3278         req->rq_interpret_reply = ptlrpc_replay_interpret;
3279         /* Readjust the timeout for current conditions */
3280         ptlrpc_at_set_req_timeout(req);
3281
3282         /* Tell server net_latency to calculate how long to wait for reply. */
3283         lustre_msg_set_service_timeout(req->rq_reqmsg,
3284                                        ptlrpc_at_get_net_latency(req));
3285         DEBUG_REQ(D_HA, req, "REPLAY");
3286
3287         atomic_inc(&req->rq_import->imp_replay_inflight);
3288         spin_lock(&req->rq_lock);
3289         req->rq_early_free_repbuf = 0;
3290         spin_unlock(&req->rq_lock);
3291         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
3292
3293         ptlrpcd_add_req(req);
3294         RETURN(0);
3295 }
3296
3297 /**
3298  * Aborts all in-flight request on import \a imp sending and delayed lists
3299  */
3300 void ptlrpc_abort_inflight(struct obd_import *imp)
3301 {
3302         struct ptlrpc_request *req;
3303         ENTRY;
3304
3305         /*
3306          * Make sure that no new requests get processed for this import.
3307          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
3308          * this flag and then putting requests on sending_list or delayed_list.
3309          */
3310         assert_spin_locked(&imp->imp_lock);
3311
3312         /*
3313          * XXX locking?  Maybe we should remove each request with the list
3314          * locked?  Also, how do we know if the requests on the list are
3315          * being freed at this time?
3316          */
3317         list_for_each_entry(req, &imp->imp_sending_list, rq_list) {
3318                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
3319
3320                 spin_lock(&req->rq_lock);
3321                 if (req->rq_import_generation < imp->imp_generation) {
3322                         req->rq_err = 1;
3323                         req->rq_status = -EIO;
3324                         ptlrpc_client_wake_req(req);
3325                 }
3326                 spin_unlock(&req->rq_lock);
3327         }
3328
3329         list_for_each_entry(req, &imp->imp_delayed_list, rq_list) {
3330                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
3331
3332                 spin_lock(&req->rq_lock);
3333                 if (req->rq_import_generation < imp->imp_generation) {
3334                         req->rq_err = 1;
3335                         req->rq_status = -EIO;
3336                         ptlrpc_client_wake_req(req);
3337                 }
3338                 spin_unlock(&req->rq_lock);
3339         }
3340
3341         /*
3342          * Last chance to free reqs left on the replay list, but we
3343          * will still leak reqs that haven't committed.
3344          */
3345         if (imp->imp_replayable)
3346                 ptlrpc_free_committed(imp);
3347
3348         EXIT;
3349 }
3350
3351 /**
3352  * Abort all uncompleted requests in request set \a set
3353  */
3354 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
3355 {
3356         struct ptlrpc_request *req;
3357
3358         LASSERT(set != NULL);
3359
3360         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
3361                 spin_lock(&req->rq_lock);
3362                 if (req->rq_phase != RQ_PHASE_RPC) {
3363                         spin_unlock(&req->rq_lock);
3364                         continue;
3365                 }
3366
3367                 req->rq_err = 1;
3368                 req->rq_status = -EINTR;
3369                 ptlrpc_client_wake_req(req);
3370                 spin_unlock(&req->rq_lock);
3371         }
3372 }
3373
3374 /**
3375  * Initialize the XID for the node.  This is common among all requests on
3376  * this node, and only requires the property that it is monotonically
3377  * increasing.  It does not need to be sequential.  Since this is also used
3378  * as the RDMA match bits, it is important that a single client NOT have
3379  * the same match bits for two different in-flight requests, hence we do
3380  * NOT want to have an XID per target or similar.
3381  *
3382  * To avoid an unlikely collision between match bits after a client reboot
3383  * (which would deliver old data into the wrong RDMA buffer) initialize
3384  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
3385  * If the time is clearly incorrect, we instead use a 62-bit random number.
3386  * In the worst case the random number will overflow 1M RPCs per second in
3387  * 9133 years, or permutations thereof.
3388  */
3389 #define YEAR_2004 (1ULL << 30)
3390 void ptlrpc_init_xid(void)
3391 {
3392         time64_t now = ktime_get_real_seconds();
3393         u64 xid;
3394
3395         if (now < YEAR_2004) {
3396                 get_random_bytes(&xid, sizeof(xid));
3397                 xid >>= 2;
3398                 xid |= (1ULL << 61);
3399         } else {
3400                 xid = (u64)now << 20;
3401         }
3402
3403         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
3404         BUILD_BUG_ON((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) !=
3405                      0);
3406         xid &= PTLRPC_BULK_OPS_MASK;
3407         atomic64_set(&ptlrpc_last_xid, xid);
3408 }
3409
3410 /**
3411  * Increase xid and returns resulting new value to the caller.
3412  *
3413  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
3414  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
3415  * itself uses the last bulk xid needed, so the server can determine the
3416  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
3417  * xid must align to a power-of-two value.
3418  *
3419  * This is assumed to be true due to the initial ptlrpc_last_xid
3420  * value also being initialized to a power-of-two value. LU-1431
3421  */
3422 __u64 ptlrpc_next_xid(void)
3423 {
3424         return atomic64_add_return(PTLRPC_BULK_OPS_COUNT, &ptlrpc_last_xid);
3425 }
3426
3427 /**
3428  * If request has a new allocated XID (new request or EINPROGRESS resend),
3429  * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3430  * request to ensure previous bulk fails and avoid problems with lost replies
3431  * and therefore several transfers landing into the same buffer from different
3432  * sending attempts.
3433  * Also, to avoid previous reply landing to a different sending attempt.
3434  */
3435 void ptlrpc_set_mbits(struct ptlrpc_request *req)
3436 {
3437         int md_count = req->rq_bulk ? req->rq_bulk->bd_md_count : 1;
3438
3439         /*
3440          * Generate new matchbits for all resend requests, including
3441          * resend replay.
3442          */
3443         if (req->rq_resend) {
3444                 __u64 old_mbits = req->rq_mbits;
3445
3446                 /*
3447                  * First time resend on -EINPROGRESS will generate new xid,
3448                  * so we can actually use the rq_xid as rq_mbits in such case,
3449                  * however, it's bit hard to distinguish such resend with a
3450                  * 'resend for the -EINPROGRESS resend'. To make it simple,
3451                  * we opt to generate mbits for all resend cases.
3452                  */
3453                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3454                                  BULK_MBITS)) {
3455                         req->rq_mbits = ptlrpc_next_xid();
3456                 } else {
3457                         /*
3458                          * Old version transfers rq_xid to peer as
3459                          * matchbits.
3460                          */
3461                         spin_lock(&req->rq_import->imp_lock);
3462                         list_del_init(&req->rq_unreplied_list);
3463                         ptlrpc_assign_next_xid_nolock(req);
3464                         spin_unlock(&req->rq_import->imp_lock);
3465                         req->rq_mbits = req->rq_xid;
3466                 }
3467                 CDEBUG(D_HA, "resend with new mbits old x%llu new x%llu\n",
3468                        old_mbits, req->rq_mbits);
3469         } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
3470                 /* Request being sent first time, use xid as matchbits. */
3471                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3472                                  BULK_MBITS) || req->rq_mbits == 0)
3473                 {
3474                         req->rq_mbits = req->rq_xid;
3475                 } else {
3476                         req->rq_mbits -= md_count - 1;
3477                 }
3478         } else {
3479                 /*
3480                  * Replay request, xid and matchbits have already been
3481                  * correctly assigned.
3482                  */
3483                 return;
3484         }
3485
3486         /*
3487          * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3488          * that server can infer the number of bulks that were prepared,
3489          * see LU-1431
3490          */
3491         req->rq_mbits += md_count - 1;
3492
3493         /*
3494          * Set rq_xid as rq_mbits to indicate the final bulk for the old
3495          * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
3496          *
3497          * It's ok to directly set the rq_xid here, since this xid bump
3498          * won't affect the request position in unreplied list.
3499          */
3500         if (!OCD_HAS_FLAG(&req->rq_import->imp_connect_data, BULK_MBITS))
3501                 req->rq_xid = req->rq_mbits;
3502 }
3503
3504 /**
3505  * Get a glimpse at what next xid value might have been.
3506  * Returns possible next xid.
3507  */
3508 __u64 ptlrpc_sample_next_xid(void)
3509 {
3510         return atomic64_read(&ptlrpc_last_xid) + PTLRPC_BULK_OPS_COUNT;
3511 }
3512 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3513
3514 /**
3515  * Functions for operating ptlrpc workers.
3516  *
3517  * A ptlrpc work is a function which will be running inside ptlrpc context.
3518  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3519  *
3520  * 1. after a work is created, it can be used many times, that is:
3521  *         handler = ptlrpcd_alloc_work();
3522  *         ptlrpcd_queue_work();
3523  *
3524  *    queue it again when necessary:
3525  *         ptlrpcd_queue_work();
3526  *         ptlrpcd_destroy_work();
3527  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3528  *    it will only be queued once in any time. Also as its name implies, it may
3529  *    have delay before it really runs by ptlrpcd thread.
3530  */
3531 struct ptlrpc_work_async_args {
3532         int (*cb)(const struct lu_env *, void *);
3533         void *cbdata;
3534 };
3535
3536 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3537 {
3538         /* re-initialize the req */
3539         req->rq_timeout         = obd_timeout;
3540         req->rq_sent            = ktime_get_real_seconds();
3541         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3542         req->rq_phase           = RQ_PHASE_INTERPRET;
3543         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3544         req->rq_xid             = ptlrpc_next_xid();
3545         req->rq_import_generation = req->rq_import->imp_generation;
3546
3547         ptlrpcd_add_req(req);
3548 }
3549
3550 static int work_interpreter(const struct lu_env *env,
3551                             struct ptlrpc_request *req, void *args, int rc)
3552 {
3553         struct ptlrpc_work_async_args *arg = args;
3554
3555         LASSERT(ptlrpcd_check_work(req));
3556         LASSERT(arg->cb != NULL);
3557
3558         rc = arg->cb(env, arg->cbdata);
3559
3560         list_del_init(&req->rq_set_chain);
3561         req->rq_set = NULL;
3562
3563         if (atomic_dec_return(&req->rq_refcount) > 1) {
3564                 atomic_set(&req->rq_refcount, 2);
3565                 ptlrpcd_add_work_req(req);
3566         }
3567         return rc;
3568 }
3569
3570 static int worker_format;
3571
3572 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3573 {
3574         return req->rq_pill.rc_fmt == (void *)&worker_format;
3575 }
3576
3577 /**
3578  * Create a work for ptlrpc.
3579  */
3580 void *ptlrpcd_alloc_work(struct obd_import *imp,
3581                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3582 {
3583         struct ptlrpc_request *req = NULL;
3584         struct ptlrpc_work_async_args *args;
3585
3586         ENTRY;
3587         might_sleep();
3588
3589         if (!cb)
3590                 RETURN(ERR_PTR(-EINVAL));
3591
3592         /* copy some code from deprecated fakereq. */
3593         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3594         if (!req) {
3595                 CERROR("ptlrpc: run out of memory!\n");
3596                 RETURN(ERR_PTR(-ENOMEM));
3597         }
3598
3599         ptlrpc_cli_req_init(req);
3600
3601         req->rq_send_state = LUSTRE_IMP_FULL;
3602         req->rq_type = PTL_RPC_MSG_REQUEST;
3603         req->rq_import = class_import_get(imp);
3604         req->rq_interpret_reply = work_interpreter;
3605         /* don't want reply */
3606         req->rq_no_delay = req->rq_no_resend = 1;
3607         req->rq_pill.rc_fmt = (void *)&worker_format;
3608
3609         args = ptlrpc_req_async_args(args, req);
3610         args->cb     = cb;
3611         args->cbdata = cbdata;
3612
3613         RETURN(req);
3614 }
3615 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3616
3617 void ptlrpcd_destroy_work(void *handler)
3618 {
3619         struct ptlrpc_request *req = handler;
3620
3621         if (req)
3622                 ptlrpc_req_finished(req);
3623 }
3624 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3625
3626 int ptlrpcd_queue_work(void *handler)
3627 {
3628         struct ptlrpc_request *req = handler;
3629
3630         /*
3631          * Check if the req is already being queued.
3632          *
3633          * Here comes a trick: it lacks a way of checking if a req is being
3634          * processed reliably in ptlrpc. Here I have to use refcount of req
3635          * for this purpose. This is okay because the caller should use this
3636          * req as opaque data. - Jinshan
3637          */
3638         LASSERT(atomic_read(&req->rq_refcount) > 0);
3639         if (atomic_inc_return(&req->rq_refcount) == 2)
3640                 ptlrpcd_add_work_req(req);
3641         return 0;
3642 }
3643 EXPORT_SYMBOL(ptlrpcd_queue_work);