Whamcloud - gitweb
LU-16314 obdclass: Migrate LASSERTF %p to %px
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 /** Implementation of client-side PortalRPC interfaces */
33
34 #define DEBUG_SUBSYSTEM S_RPC
35
36 #include <linux/delay.h>
37 #include <linux/random.h>
38
39 #include <lnet/lib-lnet.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_lib.h>
43 #include <lustre_ha.h>
44 #include <lustre_import.h>
45 #include <lustre_req_layout.h>
46
47 #include "ptlrpc_internal.h"
48
49 static void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
50                                       struct page *page, int pageoffset,
51                                       int len)
52 {
53         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
54 }
55
56 static void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
57                                         struct page *page, int pageoffset,
58                                         int len)
59 {
60         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
61 }
62
63 static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
64 {
65         int i;
66
67         for (i = 0; i < desc->bd_iov_count ; i++)
68                 put_page(desc->bd_vec[i].bv_page);
69 }
70
71 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
72                                        void *frag, int len)
73 {
74         unsigned int offset = (unsigned long)frag & ~PAGE_MASK;
75
76         ENTRY;
77         while (len > 0) {
78                 int page_len = min_t(unsigned int, PAGE_SIZE - offset,
79                                      len);
80                 struct page *p;
81
82                 if (!is_vmalloc_addr(frag))
83                         p = virt_to_page((unsigned long)frag);
84                 else
85                         p = vmalloc_to_page(frag);
86                 ptlrpc_prep_bulk_page_nopin(desc, p, offset, page_len);
87                 offset = 0;
88                 len -= page_len;
89                 frag += page_len;
90         }
91
92         RETURN(desc->bd_nob);
93 }
94
95 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
96         .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
97         .release_frags  = ptlrpc_release_bulk_page_pin,
98 };
99 EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
100
101 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
102         .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
103         .release_frags  = ptlrpc_release_bulk_noop,
104         .add_iov_frag   = ptlrpc_prep_bulk_frag_pages,
105 };
106 EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
107
108 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
109 static int ptlrpcd_check_work(struct ptlrpc_request *req);
110 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
111
112 /**
113  * Initialize passed in client structure \a cl.
114  */
115 void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
116                         struct ptlrpc_client *cl)
117 {
118         cl->cli_request_portal = req_portal;
119         cl->cli_reply_portal   = rep_portal;
120         cl->cli_name           = name;
121 }
122 EXPORT_SYMBOL(ptlrpc_init_client);
123
124 /**
125  * Return PortalRPC connection for remore uud \a uuid
126  */
127 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
128                                                     u32 refnet)
129 {
130         struct ptlrpc_connection *c;
131         struct lnet_nid self;
132         struct lnet_processid peer;
133         int err;
134
135         /*
136          * ptlrpc_uuid_to_peer() initializes its 2nd parameter
137          * before accessing its values.
138          */
139         err = ptlrpc_uuid_to_peer(uuid, &peer, &self, refnet);
140         if (err != 0) {
141                 CNETERR("cannot find peer %s!\n", uuid->uuid);
142                 return NULL;
143         }
144
145         c = ptlrpc_connection_get(&peer, &self, uuid);
146         if (c) {
147                 memcpy(c->c_remote_uuid.uuid,
148                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
149         }
150
151         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
152
153         return c;
154 }
155
156 /**
157  * Allocate and initialize new bulk descriptor on the sender.
158  * Returns pointer to the descriptor or NULL on error.
159  */
160 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
161                                          unsigned int max_brw,
162                                          enum ptlrpc_bulk_op_type type,
163                                          unsigned int portal,
164                                          const struct ptlrpc_bulk_frag_ops *ops)
165 {
166         struct ptlrpc_bulk_desc *desc;
167         int i;
168
169         LASSERT(ops->add_kiov_frag != NULL);
170
171         if (max_brw > PTLRPC_BULK_OPS_COUNT)
172                 RETURN(NULL);
173
174         if (nfrags > LNET_MAX_IOV * max_brw)
175                 RETURN(NULL);
176
177         OBD_ALLOC_PTR(desc);
178         if (!desc)
179                 return NULL;
180
181         OBD_ALLOC_LARGE(desc->bd_vec,
182                         nfrags * sizeof(*desc->bd_vec));
183         if (!desc->bd_vec)
184                 goto out;
185
186         spin_lock_init(&desc->bd_lock);
187         init_waitqueue_head(&desc->bd_waitq);
188         desc->bd_max_iov = nfrags;
189         desc->bd_iov_count = 0;
190         desc->bd_portal = portal;
191         desc->bd_type = type;
192         desc->bd_md_count = 0;
193         desc->bd_nob_last = LNET_MTU;
194         desc->bd_frag_ops = ops;
195         LASSERT(max_brw > 0);
196         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
197         /*
198          * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
199          * node. Negotiated ocd_brw_size will always be <= this number.
200          */
201         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
202                 LNetInvalidateMDHandle(&desc->bd_mds[i]);
203
204         return desc;
205 out:
206         OBD_FREE_PTR(desc);
207         return NULL;
208 }
209
210 /**
211  * Prepare bulk descriptor for specified outgoing request \a req that
212  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
213  * the bulk to be sent. Used on client-side.
214  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
215  * error.
216  */
217 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
218                                               unsigned int nfrags,
219                                               unsigned int max_brw,
220                                               unsigned int type,
221                                               unsigned int portal,
222                                               const struct ptlrpc_bulk_frag_ops
223                                                 *ops)
224 {
225         struct obd_import *imp = req->rq_import;
226         struct ptlrpc_bulk_desc *desc;
227
228         ENTRY;
229         LASSERT(ptlrpc_is_bulk_op_passive(type));
230
231         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
232         if (!desc)
233                 RETURN(NULL);
234
235         desc->bd_import = class_import_get(imp);
236         desc->bd_req = req;
237
238         desc->bd_cbid.cbid_fn  = client_bulk_callback;
239         desc->bd_cbid.cbid_arg = desc;
240
241         /* This makes req own desc, and free it when she frees herself */
242         req->rq_bulk = desc;
243
244         return desc;
245 }
246 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
247
248 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
249                              struct page *page, int pageoffset, int len,
250                              int pin)
251 {
252         struct bio_vec *kiov;
253
254         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
255         LASSERT(page != NULL);
256         LASSERT(pageoffset >= 0);
257         LASSERT(len > 0);
258         LASSERT(pageoffset + len <= PAGE_SIZE);
259
260         kiov = &desc->bd_vec[desc->bd_iov_count];
261
262         if (((desc->bd_iov_count % LNET_MAX_IOV) == 0) ||
263              ((desc->bd_nob_last + len) > LNET_MTU)) {
264                 desc->bd_mds_off[desc->bd_md_count] = desc->bd_iov_count;
265                 desc->bd_md_count++;
266                 desc->bd_nob_last = 0;
267                 LASSERT(desc->bd_md_count <= PTLRPC_BULK_OPS_COUNT);
268         }
269
270         desc->bd_nob_last += len;
271         desc->bd_nob += len;
272
273         if (pin)
274                 get_page(page);
275
276         kiov->bv_page = page;
277         kiov->bv_offset = pageoffset;
278         kiov->bv_len = len;
279
280         desc->bd_iov_count++;
281 }
282 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
283
284 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
285 {
286         ENTRY;
287
288         LASSERT(desc != NULL);
289         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
290         LASSERT(desc->bd_refs == 0);         /* network hands off */
291         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
292         LASSERT(desc->bd_frag_ops != NULL);
293
294         sptlrpc_enc_pool_put_pages(desc);
295
296         if (desc->bd_export)
297                 class_export_put(desc->bd_export);
298         else
299                 class_import_put(desc->bd_import);
300
301         if (desc->bd_frag_ops->release_frags != NULL)
302                 desc->bd_frag_ops->release_frags(desc);
303
304         OBD_FREE_LARGE(desc->bd_vec,
305                        desc->bd_max_iov * sizeof(*desc->bd_vec));
306         OBD_FREE_PTR(desc);
307         EXIT;
308 }
309 EXPORT_SYMBOL(ptlrpc_free_bulk);
310
311 /**
312  * Set server timelimit for this req, i.e. how long are we willing to wait
313  * for reply before timing out this request.
314  */
315 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
316 {
317         struct obd_device *obd;
318
319         LASSERT(req->rq_import);
320         obd = req->rq_import->imp_obd;
321
322         if (obd_at_off(obd)) {
323                 /* non-AT settings */
324                 /**
325                  * \a imp_server_timeout means this is reverse import and
326                  * we send (currently only) ASTs to the client and cannot afford
327                  * to wait too long for the reply, otherwise the other client
328                  * (because of which we are sending this request) would
329                  * timeout waiting for us
330                  */
331                 req->rq_timeout = req->rq_import->imp_server_timeout ?
332                                   obd_timeout / 2 : obd_timeout;
333         } else {
334                 struct imp_at *at = &req->rq_import->imp_at;
335                 timeout_t serv_est;
336                 int idx;
337
338                 idx = import_at_get_index(req->rq_import,
339                                           req->rq_request_portal);
340                 serv_est = obd_at_get(obd, &at->iat_service_estimate[idx]);
341                 /*
342                  * Currently a 32 bit value is sent over the
343                  * wire for rq_timeout so please don't change this
344                  * to time64_t. The work for LU-1158 will in time
345                  * replace rq_timeout with a 64 bit nanosecond value
346                  */
347                 req->rq_timeout = at_est2timeout(serv_est);
348         }
349         /*
350          * We could get even fancier here, using history to predict increased
351          * loading...
352          *
353          * Let the server know what this RPC timeout is by putting it in the
354          * reqmsg
355          */
356         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
357 }
358 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
359
360 /* Adjust max service estimate based on server value */
361 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
362                                   timeout_t serv_est)
363 {
364         int idx;
365         timeout_t oldse;
366         struct imp_at *at;
367         struct obd_device *obd;
368
369         LASSERT(req->rq_import);
370         obd = req->rq_import->imp_obd;
371         at = &req->rq_import->imp_at;
372
373         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
374         /*
375          * max service estimates are tracked on the server side,
376          * so just keep minimal history here
377          */
378         oldse = obd_at_measure(obd, &at->iat_service_estimate[idx], serv_est);
379         if (oldse != 0) {
380                 unsigned int at_est = obd_at_get(obd,
381                                                 &at->iat_service_estimate[idx]);
382                 CDEBUG(D_ADAPTTO,
383                        "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
384                        req->rq_import->imp_obd->obd_name,
385                        req->rq_request_portal,
386                        oldse, at_est);
387         }
388 }
389
390 /**
391  * Returns Expected network latency per remote node (secs).
392  *
393  * \param[in] req       ptlrpc request
394  *
395  * \retval      0 if AT(Adaptive Timeout) is off
396  * \retval      >0 (iat_net_latency) latency per node
397  */
398 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
399 {
400         struct obd_device *obd = req->rq_import->imp_obd;
401
402         return obd_at_off(obd) ?
403                0 : obd_at_get(obd, &req->rq_import->imp_at.iat_net_latency);
404 }
405
406 /* Adjust expected network latency */
407 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
408                                timeout_t service_timeout)
409 {
410         time64_t now = ktime_get_real_seconds();
411         struct imp_at *at;
412         timeout_t oldnl;
413         timeout_t nl;
414         struct obd_device *obd;
415
416         LASSERT(req->rq_import);
417         obd = req->rq_import->imp_obd;
418
419         if (service_timeout > now - req->rq_sent + 3) {
420                 /*
421                  * b=16408, however, this can also happen if early reply
422                  * is lost and client RPC is expired and resent, early reply
423                  * or reply of original RPC can still be fit in reply buffer
424                  * of resent RPC, now client is measuring time from the
425                  * resent time, but server sent back service time of original
426                  * RPC.
427                  */
428                 CDEBUG_LIMIT((lustre_msg_get_flags(req->rq_reqmsg) &
429                               MSG_RESENT) ?  D_ADAPTTO : D_WARNING,
430                              "Reported service time %u > total measured time %lld\n",
431                              service_timeout, now - req->rq_sent);
432                 return;
433         }
434
435         /* Network latency is total time less server processing time,
436          * st rounding
437          */
438         nl = max_t(timeout_t, now - req->rq_sent - service_timeout, 0) + 1;
439         at = &req->rq_import->imp_at;
440
441         oldnl = obd_at_measure(obd, &at->iat_net_latency, nl);
442         if (oldnl != 0) {
443                 timeout_t timeout = obd_at_get(obd, &at->iat_net_latency);
444
445                 CDEBUG(D_ADAPTTO,
446                        "The network latency for %s (nid %s) has changed from %d to %d\n",
447                        req->rq_import->imp_obd->obd_name,
448                        obd_uuid2str(&req->rq_import->imp_connection->c_remote_uuid),
449                        oldnl, timeout);
450         }
451 }
452
453 static int unpack_reply(struct ptlrpc_request *req)
454 {
455         int rc;
456
457         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
458                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
459                 if (rc) {
460                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: rc = %d",
461                                   rc);
462                         return -EPROTO;
463                 }
464         }
465
466         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
467         if (rc) {
468                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: rc = %d",
469                           rc);
470                 return -EPROTO;
471         }
472         return 0;
473 }
474
475 /**
476  * Handle an early reply message, called with the rq_lock held.
477  * If anything goes wrong just ignore it - same as if it never happened
478  */
479 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
480 __must_hold(&req->rq_lock)
481 {
482         struct ptlrpc_request *early_req;
483         timeout_t service_timeout;
484         time64_t olddl;
485         int rc;
486
487         ENTRY;
488         req->rq_early = 0;
489         spin_unlock(&req->rq_lock);
490
491         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
492         if (rc) {
493                 spin_lock(&req->rq_lock);
494                 RETURN(rc);
495         }
496
497         rc = unpack_reply(early_req);
498         if (rc != 0) {
499                 sptlrpc_cli_finish_early_reply(early_req);
500                 spin_lock(&req->rq_lock);
501                 RETURN(rc);
502         }
503
504         /*
505          * Use new timeout value just to adjust the local value for this
506          * request, don't include it into at_history. It is unclear yet why
507          * service time increased and should it be counted or skipped, e.g.
508          * that can be recovery case or some error or server, the real reply
509          * will add all new data if it is worth to add.
510          */
511         req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
512         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
513
514         /* Network latency can be adjusted, it is pure network delays */
515         service_timeout = lustre_msg_get_service_timeout(early_req->rq_repmsg);
516         ptlrpc_at_adj_net_latency(req, service_timeout);
517
518         sptlrpc_cli_finish_early_reply(early_req);
519
520         spin_lock(&req->rq_lock);
521         olddl = req->rq_deadline;
522         /*
523          * server assumes it now has rq_timeout from when the request
524          * arrived, so the client should give it at least that long.
525          * since we don't know the arrival time we'll use the original
526          * sent time
527          */
528         req->rq_deadline = req->rq_sent + req->rq_timeout +
529                            ptlrpc_at_get_net_latency(req);
530
531         /* The below message is checked in replay-single.sh test_65{a,b} */
532         /* The below message is checked in sanity-{gss,krb5} test_8 */
533         DEBUG_REQ(D_ADAPTTO, req,
534                   "Early reply #%d, new deadline in %llds (%llds)",
535                   req->rq_early_count,
536                   req->rq_deadline - ktime_get_real_seconds(),
537                   req->rq_deadline - olddl);
538
539         RETURN(rc);
540 }
541
542 static struct kmem_cache *request_cache;
543
544 int ptlrpc_request_cache_init(void)
545 {
546         request_cache = kmem_cache_create("ptlrpc_cache",
547                                           sizeof(struct ptlrpc_request),
548                                           0, SLAB_HWCACHE_ALIGN, NULL);
549         return request_cache ? 0 : -ENOMEM;
550 }
551
552 void ptlrpc_request_cache_fini(void)
553 {
554         kmem_cache_destroy(request_cache);
555 }
556
557 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
558 {
559         struct ptlrpc_request *req;
560
561         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
562         return req;
563 }
564
565 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
566 {
567         OBD_SLAB_FREE_PTR(req, request_cache);
568 }
569
570 /**
571  * Wind down request pool \a pool.
572  * Frees all requests from the pool too
573  */
574 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
575 {
576         struct ptlrpc_request *req;
577
578         LASSERT(pool != NULL);
579
580         spin_lock(&pool->prp_lock);
581         while ((req = list_first_entry_or_null(&pool->prp_req_list,
582                                                struct ptlrpc_request,
583                                                rq_list))) {
584                 list_del(&req->rq_list);
585                 LASSERT(req->rq_reqbuf);
586                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
587                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
588                 ptlrpc_request_cache_free(req);
589         }
590         spin_unlock(&pool->prp_lock);
591         OBD_FREE(pool, sizeof(*pool));
592 }
593 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
594
595 /**
596  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
597  */
598 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
599 {
600         int i;
601         int size = 1;
602
603         while (size < pool->prp_rq_size)
604                 size <<= 1;
605
606         LASSERTF(list_empty(&pool->prp_req_list) ||
607                  size == pool->prp_rq_size,
608                  "Trying to change pool size with nonempty pool from %d to %d bytes\n",
609                  pool->prp_rq_size, size);
610
611         pool->prp_rq_size = size;
612         for (i = 0; i < num_rq; i++) {
613                 struct ptlrpc_request *req;
614                 struct lustre_msg *msg;
615
616                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
617                 if (!req)
618                         return i;
619                 OBD_ALLOC_LARGE(msg, size);
620                 if (!msg) {
621                         ptlrpc_request_cache_free(req);
622                         return i;
623                 }
624                 req->rq_reqbuf = msg;
625                 req->rq_reqbuf_len = size;
626                 req->rq_pool = pool;
627                 spin_lock(&pool->prp_lock);
628                 list_add_tail(&req->rq_list, &pool->prp_req_list);
629                 spin_unlock(&pool->prp_lock);
630         }
631         return num_rq;
632 }
633 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
634
635 /**
636  * Create and initialize new request pool with given attributes:
637  * \a num_rq - initial number of requests to create for the pool
638  * \a msgsize - maximum message size possible for requests in thid pool
639  * \a populate_pool - function to be called when more requests need to be added
640  *                    to the pool
641  * Returns pointer to newly created pool or NULL on error.
642  */
643 struct ptlrpc_request_pool *
644 ptlrpc_init_rq_pool(int num_rq, int msgsize,
645                     int (*populate_pool)(struct ptlrpc_request_pool *, int))
646 {
647         struct ptlrpc_request_pool *pool;
648
649         OBD_ALLOC_PTR(pool);
650         if (!pool)
651                 return NULL;
652
653         /*
654          * Request next power of two for the allocation, because internally
655          * kernel would do exactly this
656          */
657         spin_lock_init(&pool->prp_lock);
658         INIT_LIST_HEAD(&pool->prp_req_list);
659         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
660         pool->prp_populate = populate_pool;
661
662         populate_pool(pool, num_rq);
663
664         return pool;
665 }
666 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
667
668 /**
669  * Fetches one request from pool \a pool
670  */
671 static struct ptlrpc_request *
672 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
673 {
674         struct ptlrpc_request *request;
675         struct lustre_msg *reqbuf;
676
677         if (!pool)
678                 return NULL;
679
680         spin_lock(&pool->prp_lock);
681
682         /*
683          * See if we have anything in a pool, and bail out if nothing,
684          * in writeout path, where this matters, this is safe to do, because
685          * nothing is lost in this case, and when some in-flight requests
686          * complete, this code will be called again.
687          */
688         if (unlikely(list_empty(&pool->prp_req_list))) {
689                 spin_unlock(&pool->prp_lock);
690                 return NULL;
691         }
692
693         request = list_first_entry(&pool->prp_req_list, struct ptlrpc_request,
694                                    rq_list);
695         list_del_init(&request->rq_list);
696         spin_unlock(&pool->prp_lock);
697
698         LASSERT(request->rq_reqbuf);
699         LASSERT(request->rq_pool);
700
701         reqbuf = request->rq_reqbuf;
702         memset(request, 0, sizeof(*request));
703         request->rq_reqbuf = reqbuf;
704         request->rq_reqbuf_len = pool->prp_rq_size;
705         request->rq_pool = pool;
706
707         return request;
708 }
709
710 /**
711  * Returns freed \a request to pool.
712  */
713 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
714 {
715         struct ptlrpc_request_pool *pool = request->rq_pool;
716
717         spin_lock(&pool->prp_lock);
718         LASSERT(list_empty(&request->rq_list));
719         LASSERT(!request->rq_receiving_reply);
720         list_add_tail(&request->rq_list, &pool->prp_req_list);
721         spin_unlock(&pool->prp_lock);
722 }
723
724 void ptlrpc_add_unreplied(struct ptlrpc_request *req)
725 {
726         struct obd_import *imp = req->rq_import;
727         struct ptlrpc_request *iter;
728
729         assert_spin_locked(&imp->imp_lock);
730         LASSERT(list_empty(&req->rq_unreplied_list));
731
732         /* unreplied list is sorted by xid in ascending order */
733         list_for_each_entry_reverse(iter, &imp->imp_unreplied_list,
734                                     rq_unreplied_list) {
735                 LASSERT(req->rq_xid != iter->rq_xid);
736                 if (req->rq_xid < iter->rq_xid)
737                         continue;
738                 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
739                 return;
740         }
741         list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
742 }
743
744 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
745 {
746         req->rq_xid = ptlrpc_next_xid();
747         ptlrpc_add_unreplied(req);
748 }
749
750 static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
751 {
752         spin_lock(&req->rq_import->imp_lock);
753         ptlrpc_assign_next_xid_nolock(req);
754         spin_unlock(&req->rq_import->imp_lock);
755 }
756
757 static atomic64_t ptlrpc_last_xid;
758
759 static void ptlrpc_reassign_next_xid(struct ptlrpc_request *req)
760 {
761         spin_lock(&req->rq_import->imp_lock);
762         list_del_init(&req->rq_unreplied_list);
763         ptlrpc_assign_next_xid_nolock(req);
764         spin_unlock(&req->rq_import->imp_lock);
765         DEBUG_REQ(D_RPCTRACE, req, "reassign xid");
766 }
767
768 void ptlrpc_get_mod_rpc_slot(struct ptlrpc_request *req)
769 {
770         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
771         __u32 opc;
772         __u16 tag;
773
774         opc = lustre_msg_get_opc(req->rq_reqmsg);
775         tag = obd_get_mod_rpc_slot(cli, opc);
776         lustre_msg_set_tag(req->rq_reqmsg, tag);
777         ptlrpc_reassign_next_xid(req);
778 }
779 EXPORT_SYMBOL(ptlrpc_get_mod_rpc_slot);
780
781 void ptlrpc_put_mod_rpc_slot(struct ptlrpc_request *req)
782 {
783         __u16 tag = lustre_msg_get_tag(req->rq_reqmsg);
784
785         if (tag != 0) {
786                 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
787                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
788
789                 obd_put_mod_rpc_slot(cli, opc, tag);
790         }
791 }
792 EXPORT_SYMBOL(ptlrpc_put_mod_rpc_slot);
793
794 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
795                              __u32 version, int opcode, char **bufs,
796                              struct ptlrpc_cli_ctx *ctx)
797 {
798         int count;
799         struct obd_import *imp;
800         __u32 *lengths;
801         int rc;
802
803         ENTRY;
804
805         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
806         imp = request->rq_import;
807         lengths = request->rq_pill.rc_area[RCL_CLIENT];
808
809         if (ctx) {
810                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
811         } else {
812                 rc = sptlrpc_req_get_ctx(request);
813                 if (rc)
814                         GOTO(out_free, rc);
815         }
816         sptlrpc_req_set_flavor(request, opcode);
817
818         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
819                                  lengths, bufs);
820         if (rc)
821                 GOTO(out_ctx, rc);
822
823         lustre_msg_add_version(request->rq_reqmsg, version);
824         request->rq_send_state = LUSTRE_IMP_FULL;
825         request->rq_type = PTL_RPC_MSG_REQUEST;
826
827         request->rq_req_cbid.cbid_fn  = request_out_callback;
828         request->rq_req_cbid.cbid_arg = request;
829
830         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
831         request->rq_reply_cbid.cbid_arg = request;
832
833         request->rq_reply_deadline = 0;
834         request->rq_bulk_deadline = 0;
835         request->rq_req_deadline = 0;
836         request->rq_phase = RQ_PHASE_NEW;
837         request->rq_next_phase = RQ_PHASE_UNDEFINED;
838
839         request->rq_request_portal = imp->imp_client->cli_request_portal;
840         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
841
842         ptlrpc_at_set_req_timeout(request);
843
844         lustre_msg_set_opc(request->rq_reqmsg, opcode);
845
846         /* Let's setup deadline for req/reply/bulk unlink for opcode. */
847         if (cfs_fail_val == opcode) {
848                 time64_t *fail_t = NULL, *fail2_t = NULL;
849
850                 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
851                         fail_t = &request->rq_bulk_deadline;
852                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
853                         fail_t = &request->rq_reply_deadline;
854                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
855                         fail_t = &request->rq_req_deadline;
856                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
857                         fail_t = &request->rq_reply_deadline;
858                         fail2_t = &request->rq_bulk_deadline;
859                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_ROUND_XID)) {
860                         time64_t now = ktime_get_real_seconds();
861                         u64 xid = ((u64)now >> 4) << 24;
862
863                         atomic64_set(&ptlrpc_last_xid, xid);
864                 }
865
866                 if (fail_t) {
867                         *fail_t = ktime_get_real_seconds() +
868                                   PTLRPC_REQ_LONG_UNLINK;
869
870                         if (fail2_t)
871                                 *fail2_t = ktime_get_real_seconds() +
872                                            PTLRPC_REQ_LONG_UNLINK;
873
874                         /*
875                          * The RPC is infected, let the test to change the
876                          * fail_loc
877                          */
878                         msleep(4 * MSEC_PER_SEC);
879                 }
880         }
881         ptlrpc_assign_next_xid(request);
882
883         RETURN(0);
884
885 out_ctx:
886         LASSERT(!request->rq_pool);
887         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
888 out_free:
889         atomic_dec(&imp->imp_reqs);
890         class_import_put(imp);
891
892         return rc;
893 }
894 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
895
896 /**
897  * Pack request buffers for network transfer, performing necessary encryption
898  * steps if necessary.
899  */
900 int ptlrpc_request_pack(struct ptlrpc_request *request,
901                         __u32 version, int opcode)
902 {
903         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
904 }
905 EXPORT_SYMBOL(ptlrpc_request_pack);
906
907 /**
908  * Helper function to allocate new request on import \a imp
909  * and possibly using existing request from pool \a pool if provided.
910  * Returns allocated request structure with import field filled or
911  * NULL on error.
912  */
913 static inline
914 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
915                                               struct ptlrpc_request_pool *pool)
916 {
917         struct ptlrpc_request *request = NULL;
918
919         request = ptlrpc_request_cache_alloc(GFP_NOFS);
920
921         if (!request && pool)
922                 request = ptlrpc_prep_req_from_pool(pool);
923
924         if (request) {
925                 ptlrpc_cli_req_init(request);
926
927                 LASSERTF((unsigned long)imp > 0x1000, "%px\n", imp);
928                 LASSERT(imp != LP_POISON);
929                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%px\n",
930                          imp->imp_client);
931                 LASSERT(imp->imp_client != LP_POISON);
932
933                 request->rq_import = class_import_get(imp);
934                 atomic_inc(&imp->imp_reqs);
935         } else {
936                 CERROR("request allocation out of memory\n");
937         }
938
939         return request;
940 }
941
942 static int ptlrpc_reconnect_if_idle(struct obd_import *imp)
943 {
944         int rc;
945
946         /*
947          * initiate connection if needed when the import has been
948          * referenced by the new request to avoid races with disconnect.
949          * serialize this check against conditional state=IDLE
950          * in ptlrpc_disconnect_idle_interpret()
951          */
952         spin_lock(&imp->imp_lock);
953         if (imp->imp_state == LUSTRE_IMP_IDLE) {
954                 imp->imp_generation++;
955                 imp->imp_initiated_at = imp->imp_generation;
956                 imp->imp_state = LUSTRE_IMP_NEW;
957
958                 /* connect_import_locked releases imp_lock */
959                 rc = ptlrpc_connect_import_locked(imp);
960                 if (rc)
961                         return rc;
962                 ptlrpc_pinger_add_import(imp);
963         } else {
964                 spin_unlock(&imp->imp_lock);
965         }
966         return 0;
967 }
968
969 /**
970  * Helper function for creating a request.
971  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
972  * buffer structures according to capsule template \a format.
973  * Returns allocated request structure pointer or NULL on error.
974  */
975 static struct ptlrpc_request *
976 ptlrpc_request_alloc_internal(struct obd_import *imp,
977                               struct ptlrpc_request_pool *pool,
978                               const struct req_format *format)
979 {
980         struct ptlrpc_request *request;
981
982         request = __ptlrpc_request_alloc(imp, pool);
983         if (!request)
984                 return NULL;
985
986         /* don't make expensive check for idling connection
987          * if it's already connected */
988         if (unlikely(imp->imp_state != LUSTRE_IMP_FULL)) {
989                 if (ptlrpc_reconnect_if_idle(imp) < 0) {
990                         atomic_dec(&imp->imp_reqs);
991                         ptlrpc_request_free(request);
992                         return NULL;
993                 }
994         }
995
996         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
997         req_capsule_set(&request->rq_pill, format);
998         return request;
999 }
1000
1001 /**
1002  * Allocate new request structure for import \a imp and initialize its
1003  * buffer structure according to capsule template \a format.
1004  */
1005 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
1006                                             const struct req_format *format)
1007 {
1008         return ptlrpc_request_alloc_internal(imp, NULL, format);
1009 }
1010 EXPORT_SYMBOL(ptlrpc_request_alloc);
1011
1012 /**
1013  * Allocate new request structure for import \a imp from pool \a pool and
1014  * initialize its buffer structure according to capsule template \a format.
1015  */
1016 struct ptlrpc_request *
1017 ptlrpc_request_alloc_pool(struct obd_import *imp,
1018                           struct ptlrpc_request_pool *pool,
1019                           const struct req_format *format)
1020 {
1021         return ptlrpc_request_alloc_internal(imp, pool, format);
1022 }
1023 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
1024
1025 /**
1026  * For requests not from pool, free memory of the request structure.
1027  * For requests obtained from a pool earlier, return request back to pool.
1028  */
1029 void ptlrpc_request_free(struct ptlrpc_request *request)
1030 {
1031         if (request->rq_pool)
1032                 __ptlrpc_free_req_to_pool(request);
1033         else
1034                 ptlrpc_request_cache_free(request);
1035 }
1036 EXPORT_SYMBOL(ptlrpc_request_free);
1037
1038 /**
1039  * Allocate new request for operatione \a opcode and immediatelly pack it for
1040  * network transfer.
1041  * Only used for simple requests like OBD_PING where the only important
1042  * part of the request is operation itself.
1043  * Returns allocated request or NULL on error.
1044  */
1045 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1046                                                  const struct req_format *format,
1047                                                  __u32 version, int opcode)
1048 {
1049         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
1050         int rc;
1051
1052         if (req) {
1053                 rc = ptlrpc_request_pack(req, version, opcode);
1054                 if (rc) {
1055                         ptlrpc_request_free(req);
1056                         req = NULL;
1057                 }
1058         }
1059         return req;
1060 }
1061 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
1062
1063 /**
1064  * Allocate and initialize new request set structure on the current CPT.
1065  * Returns a pointer to the newly allocated set structure or NULL on error.
1066  */
1067 struct ptlrpc_request_set *ptlrpc_prep_set(void)
1068 {
1069         struct ptlrpc_request_set *set;
1070         int cpt;
1071
1072         ENTRY;
1073         cpt = cfs_cpt_current(cfs_cpt_tab, 0);
1074         OBD_CPT_ALLOC(set, cfs_cpt_tab, cpt, sizeof(*set));
1075         if (!set)
1076                 RETURN(NULL);
1077         atomic_set(&set->set_refcount, 1);
1078         INIT_LIST_HEAD(&set->set_requests);
1079         init_waitqueue_head(&set->set_waitq);
1080         atomic_set(&set->set_new_count, 0);
1081         atomic_set(&set->set_remaining, 0);
1082         spin_lock_init(&set->set_new_req_lock);
1083         INIT_LIST_HEAD(&set->set_new_requests);
1084         set->set_max_inflight = UINT_MAX;
1085         set->set_producer     = NULL;
1086         set->set_producer_arg = NULL;
1087         set->set_rc           = 0;
1088
1089         RETURN(set);
1090 }
1091 EXPORT_SYMBOL(ptlrpc_prep_set);
1092
1093 /**
1094  * Allocate and initialize new request set structure with flow control
1095  * extension. This extension allows to control the number of requests in-flight
1096  * for the whole set. A callback function to generate requests must be provided
1097  * and the request set will keep the number of requests sent over the wire to
1098  * @max_inflight.
1099  * Returns a pointer to the newly allocated set structure or NULL on error.
1100  */
1101 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1102                                              void *arg)
1103
1104 {
1105         struct ptlrpc_request_set *set;
1106
1107         set = ptlrpc_prep_set();
1108         if (!set)
1109                 RETURN(NULL);
1110
1111         set->set_max_inflight  = max;
1112         set->set_producer      = func;
1113         set->set_producer_arg  = arg;
1114
1115         RETURN(set);
1116 }
1117
1118 /**
1119  * Wind down and free request set structure previously allocated with
1120  * ptlrpc_prep_set.
1121  * Ensures that all requests on the set have completed and removes
1122  * all requests from the request list in a set.
1123  * If any unsent request happen to be on the list, pretends that they got
1124  * an error in flight and calls their completion handler.
1125  */
1126 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
1127 {
1128         struct ptlrpc_request *req;
1129         int expected_phase;
1130         int n = 0;
1131
1132         ENTRY;
1133
1134         /* Requests on the set should either all be completed, or all be new */
1135         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
1136                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
1137         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
1138                 LASSERT(req->rq_phase == expected_phase);
1139                 n++;
1140         }
1141
1142         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
1143                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
1144                  atomic_read(&set->set_remaining), n);
1145
1146         while ((req = list_first_entry_or_null(&set->set_requests,
1147                                                struct ptlrpc_request,
1148                                                rq_set_chain))) {
1149                 list_del_init(&req->rq_set_chain);
1150
1151                 LASSERT(req->rq_phase == expected_phase);
1152
1153                 if (req->rq_phase == RQ_PHASE_NEW) {
1154                         ptlrpc_req_interpret(NULL, req, -EBADR);
1155                         atomic_dec(&set->set_remaining);
1156                 }
1157
1158                 spin_lock(&req->rq_lock);
1159                 req->rq_set = NULL;
1160                 req->rq_invalid_rqset = 0;
1161                 spin_unlock(&req->rq_lock);
1162
1163                 ptlrpc_req_finished(req);
1164         }
1165
1166         LASSERT(atomic_read(&set->set_remaining) == 0);
1167
1168         ptlrpc_reqset_put(set);
1169         EXIT;
1170 }
1171 EXPORT_SYMBOL(ptlrpc_set_destroy);
1172
1173 /**
1174  * Add a new request to the general purpose request set.
1175  * Assumes request reference from the caller.
1176  */
1177 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
1178                         struct ptlrpc_request *req)
1179 {
1180         if (set == PTLRPCD_SET) {
1181                 ptlrpcd_add_req(req);
1182                 return;
1183         }
1184
1185         LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
1186         LASSERT(list_empty(&req->rq_set_chain));
1187
1188         if (req->rq_allow_intr)
1189                 set->set_allow_intr = 1;
1190
1191         /* The set takes over the caller's request reference */
1192         list_add_tail(&req->rq_set_chain, &set->set_requests);
1193         req->rq_set = set;
1194         atomic_inc(&set->set_remaining);
1195         req->rq_queued_time = ktime_get_seconds();
1196
1197         if (req->rq_reqmsg) {
1198                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1199                 lustre_msg_set_uid_gid(req->rq_reqmsg, NULL, NULL);
1200         }
1201
1202         if (set->set_producer)
1203                 /*
1204                  * If the request set has a producer callback, the RPC must be
1205                  * sent straight away
1206                  */
1207                 ptlrpc_send_new_req(req);
1208 }
1209 EXPORT_SYMBOL(ptlrpc_set_add_req);
1210
1211 /**
1212  * Add a request to a request with dedicated server thread
1213  * and wake the thread to make any necessary processing.
1214  * Currently only used for ptlrpcd.
1215  */
1216 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1217                             struct ptlrpc_request *req)
1218 {
1219         struct ptlrpc_request_set *set = pc->pc_set;
1220         int count, i;
1221
1222         LASSERT(req->rq_set == NULL);
1223         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1224
1225         spin_lock(&set->set_new_req_lock);
1226         /*
1227          * The set takes over the caller's request reference.
1228          */
1229         req->rq_set = set;
1230         req->rq_queued_time = ktime_get_seconds();
1231         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1232         count = atomic_inc_return(&set->set_new_count);
1233         spin_unlock(&set->set_new_req_lock);
1234
1235         /* Only need to call wakeup once for the first entry. */
1236         if (count == 1) {
1237                 wake_up(&set->set_waitq);
1238
1239                 /*
1240                  * XXX: It maybe unnecessary to wakeup all the partners. But to
1241                  *      guarantee the async RPC can be processed ASAP, we have
1242                  *      no other better choice. It maybe fixed in future.
1243                  */
1244                 for (i = 0; i < pc->pc_npartners; i++)
1245                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1246         }
1247 }
1248
1249 /**
1250  * Based on the current state of the import, determine if the request
1251  * can be sent, is an error, or should be delayed.
1252  *
1253  * Returns true if this request should be delayed. If false, and
1254  * *status is set, then the request can not be sent and *status is the
1255  * error code.  If false and status is 0, then request can be sent.
1256  *
1257  * The imp->imp_lock must be held.
1258  */
1259 static int ptlrpc_import_delay_req(struct obd_import *imp,
1260                                    struct ptlrpc_request *req, int *status)
1261 {
1262         int delay = 0;
1263
1264         ENTRY;
1265         LASSERT(status);
1266         *status = 0;
1267
1268         if (req->rq_ctx_init || req->rq_ctx_fini) {
1269                 /* always allow ctx init/fini rpc go through */
1270         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1271                 DEBUG_REQ(D_ERROR, req, "Uninitialized import");
1272                 *status = -EIO;
1273         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1274                 unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
1275
1276                 /*
1277                  * pings or MDS-equivalent STATFS may safely
1278                  * race with umount
1279                  */
1280                 DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
1281                           D_HA : D_ERROR, req, "IMP_CLOSED");
1282                 *status = -EIO;
1283         } else if (ptlrpc_send_limit_expired(req)) {
1284                 /* probably doesn't need to be a D_ERROR afterinitial testing */
1285                 DEBUG_REQ(D_HA, req, "send limit expired");
1286                 *status = -ETIMEDOUT;
1287         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1288                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1289                 ;/* allow CONNECT even if import is invalid */
1290                 if (atomic_read(&imp->imp_inval_count) != 0) {
1291                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1292                         *status = -EIO;
1293                 }
1294         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1295                 if (!imp->imp_deactive)
1296                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1297                 *status = -ESHUTDOWN; /* b=12940 */
1298         } else if (req->rq_import_generation != imp->imp_generation) {
1299                 DEBUG_REQ(req->rq_no_resend ? D_INFO : D_ERROR,
1300                           req, "req wrong generation:");
1301                 *status = -EIO;
1302         } else if (req->rq_send_state != imp->imp_state) {
1303                 /* invalidate in progress - any requests should be drop */
1304                 if (atomic_read(&imp->imp_inval_count) != 0) {
1305                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1306                         *status = -EIO;
1307                 } else if (req->rq_no_delay &&
1308                            imp->imp_generation != imp->imp_initiated_at) {
1309                         /* ignore nodelay for requests initiating connections */
1310                         *status = -EAGAIN;
1311                 } else if (req->rq_allow_replay &&
1312                            (imp->imp_state == LUSTRE_IMP_REPLAY ||
1313                             imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1314                             imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1315                             imp->imp_state == LUSTRE_IMP_RECOVER)) {
1316                         DEBUG_REQ(D_HA, req, "allow during recovery");
1317                 } else {
1318                         delay = 1;
1319                 }
1320         }
1321
1322         RETURN(delay);
1323 }
1324
1325 /**
1326  * Decide if the error message should be printed to the console or not.
1327  * Makes its decision based on request type, status, and failure frequency.
1328  *
1329  * \param[in] req  request that failed and may need a console message
1330  *
1331  * \retval false if no message should be printed
1332  * \retval true  if console message should be printed
1333  */
1334 static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
1335 {
1336         LASSERT(req->rq_reqmsg != NULL);
1337
1338         /* Suppress particular reconnect errors which are to be expected. */
1339         if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
1340                 /* Suppress timed out reconnect requests */
1341                 if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
1342                     req->rq_timedout)
1343                         return false;
1344
1345                 /*
1346                  * Suppress most unavailable/again reconnect requests, but
1347                  * print occasionally so it is clear client is trying to
1348                  * connect to a server where no target is running.
1349                  */
1350                 if ((err == -ENODEV || err == -EAGAIN) &&
1351                     req->rq_import->imp_conn_cnt % 30 != 20)
1352                         return false;
1353         }
1354
1355         if (opc == LDLM_ENQUEUE && err == -EAGAIN)
1356                 /* -EAGAIN is normal when using POSIX flocks */
1357                 return false;
1358
1359         if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
1360             (req->rq_xid & 0xf) != 10)
1361                 /* Suppress most ping requests, they may fail occasionally */
1362                 return false;
1363
1364         return true;
1365 }
1366
1367 /**
1368  * Check request processing status.
1369  * Returns the status.
1370  */
1371 static int ptlrpc_check_status(struct ptlrpc_request *req)
1372 {
1373         int rc;
1374
1375         ENTRY;
1376         rc = lustre_msg_get_status(req->rq_repmsg);
1377         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1378                 struct obd_import *imp = req->rq_import;
1379                 struct lnet_nid *nid = &imp->imp_connection->c_peer.nid;
1380                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1381
1382                 if (ptlrpc_console_allow(req, opc, rc))
1383                         LCONSOLE_ERROR_MSG(0x11,
1384                                            "%s: operation %s to node %s failed: rc = %d\n",
1385                                            imp->imp_obd->obd_name,
1386                                            ll_opcode2str(opc),
1387                                            libcfs_nidstr(nid), rc);
1388                 RETURN(rc < 0 ? rc : -EINVAL);
1389         }
1390
1391         if (rc)
1392                 DEBUG_REQ(D_INFO, req, "check status: rc = %d", rc);
1393
1394         RETURN(rc);
1395 }
1396
1397 /**
1398  * save pre-versions of objects into request for replay.
1399  * Versions are obtained from server reply.
1400  * used for VBR.
1401  */
1402 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1403 {
1404         struct lustre_msg *repmsg = req->rq_repmsg;
1405         struct lustre_msg *reqmsg = req->rq_reqmsg;
1406         __u64 *versions = lustre_msg_get_versions(repmsg);
1407
1408         ENTRY;
1409         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1410                 return;
1411
1412         LASSERT(versions);
1413         lustre_msg_set_versions(reqmsg, versions);
1414         CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
1415                versions[0], versions[1]);
1416
1417         EXIT;
1418 }
1419
1420 __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1421 {
1422         struct ptlrpc_request *req;
1423
1424         assert_spin_locked(&imp->imp_lock);
1425         if (list_empty(&imp->imp_unreplied_list))
1426                 return 0;
1427
1428         req = list_first_entry(&imp->imp_unreplied_list, struct ptlrpc_request,
1429                                rq_unreplied_list);
1430         LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1431
1432         if (imp->imp_known_replied_xid < req->rq_xid - 1)
1433                 imp->imp_known_replied_xid = req->rq_xid - 1;
1434
1435         return req->rq_xid - 1;
1436 }
1437
1438 /**
1439  * Callback function called when client receives RPC reply for \a req.
1440  * Returns 0 on success or error code.
1441  * The return alue would be assigned to req->rq_status by the caller
1442  * as request processing status.
1443  * This function also decides if the request needs to be saved for later replay.
1444  */
1445 static int after_reply(struct ptlrpc_request *req)
1446 {
1447         struct obd_import *imp = req->rq_import;
1448         struct obd_device *obd = req->rq_import->imp_obd;
1449         ktime_t work_start;
1450         u64 committed;
1451         s64 timediff;
1452         int rc;
1453
1454         ENTRY;
1455         LASSERT(obd != NULL);
1456         /* repbuf must be unlinked */
1457         LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
1458
1459         if (req->rq_reply_truncated) {
1460                 if (ptlrpc_no_resend(req)) {
1461                         DEBUG_REQ(D_ERROR, req,
1462                                   "reply buffer overflow, expected=%d, actual size=%d",
1463                                   req->rq_nob_received, req->rq_repbuf_len);
1464                         RETURN(-EOVERFLOW);
1465                 }
1466
1467                 sptlrpc_cli_free_repbuf(req);
1468                 /*
1469                  * Pass the required reply buffer size (include
1470                  * space for early reply).
1471                  * NB: no need to roundup because alloc_repbuf
1472                  * will roundup it
1473                  */
1474                 req->rq_replen = req->rq_nob_received;
1475                 req->rq_nob_received = 0;
1476                 spin_lock(&req->rq_lock);
1477                 req->rq_resend       = 1;
1478                 spin_unlock(&req->rq_lock);
1479                 RETURN(0);
1480         }
1481
1482         work_start = ktime_get_real();
1483         timediff = ktime_us_delta(work_start, req->rq_sent_ns);
1484
1485         /*
1486          * NB Until this point, the whole of the incoming message,
1487          * including buflens, status etc is in the sender's byte order.
1488          */
1489         rc = sptlrpc_cli_unwrap_reply(req);
1490         if (rc) {
1491                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed: rc = %d", rc);
1492                 RETURN(rc);
1493         }
1494
1495         /*
1496          * Security layer unwrap might ask resend this request.
1497          */
1498         if (req->rq_resend)
1499                 RETURN(0);
1500
1501         rc = unpack_reply(req);
1502         if (rc)
1503                 RETURN(rc);
1504
1505         /* retry indefinitely on EINPROGRESS */
1506         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1507             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1508                 time64_t now = ktime_get_real_seconds();
1509
1510                 DEBUG_REQ((req->rq_nr_resend % 8 == 1 ? D_WARNING : 0) |
1511                           D_RPCTRACE, req, "resending request on EINPROGRESS");
1512                 spin_lock(&req->rq_lock);
1513                 req->rq_resend = 1;
1514                 spin_unlock(&req->rq_lock);
1515                 req->rq_nr_resend++;
1516
1517                 /* Readjust the timeout for current conditions */
1518                 ptlrpc_at_set_req_timeout(req);
1519                 /*
1520                  * delay resend to give a chance to the server to get ready.
1521                  * The delay is increased by 1s on every resend and is capped to
1522                  * the current request timeout (i.e. obd_timeout if AT is off,
1523                  * or AT service time x 125% + 5s, see at_est2timeout)
1524                  */
1525                 if (req->rq_nr_resend > req->rq_timeout)
1526                         req->rq_sent = now + req->rq_timeout;
1527                 else
1528                         req->rq_sent = now + req->rq_nr_resend;
1529
1530                 /* Resend for EINPROGRESS will use a new XID */
1531                 spin_lock(&imp->imp_lock);
1532                 list_del_init(&req->rq_unreplied_list);
1533                 spin_unlock(&imp->imp_lock);
1534
1535                 RETURN(0);
1536         }
1537
1538         if (obd->obd_svc_stats) {
1539                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1540                                     timediff);
1541                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1542         }
1543
1544         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1545             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1546                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1547                           lustre_msg_get_type(req->rq_repmsg));
1548                 RETURN(-EPROTO);
1549         }
1550
1551         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1552                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1553         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1554         ptlrpc_at_adj_net_latency(req,
1555                                   lustre_msg_get_service_timeout(req->rq_repmsg));
1556
1557         rc = ptlrpc_check_status(req);
1558
1559         if (rc) {
1560                 /*
1561                  * Either we've been evicted, or the server has failed for
1562                  * some reason. Try to reconnect, and if that fails, punt to
1563                  * the upcall.
1564                  */
1565                 if (ptlrpc_recoverable_error(rc)) {
1566                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1567                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1568                                 RETURN(rc);
1569                         }
1570                         ptlrpc_request_handle_notconn(req);
1571                         RETURN(rc);
1572                 }
1573         } else {
1574                 /*
1575                  * Let's look if server sent slv. Do it only for RPC with
1576                  * rc == 0.
1577                  */
1578                 ldlm_cli_update_pool(req);
1579         }
1580
1581         /*
1582          * Store transno in reqmsg for replay.
1583          */
1584         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1585                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1586                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1587         }
1588
1589         if (lustre_msg_get_transno(req->rq_repmsg) ||
1590             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_ENQUEUE)
1591                 imp->imp_no_cached_data = 0;
1592
1593         if (imp->imp_replayable) {
1594                 /* if other threads are waiting for ptlrpc_free_committed()
1595                  * they could continue the work of freeing RPCs. That reduces
1596                  * lock hold times, and distributes work more fairly across
1597                  * waiting threads.  We can't use spin_is_contended() since
1598                  * there are many other places where imp_lock is held.
1599                  */
1600                 atomic_inc(&imp->imp_waiting);
1601                 spin_lock(&imp->imp_lock);
1602                 atomic_dec(&imp->imp_waiting);
1603                 /*
1604                  * No point in adding already-committed requests to the replay
1605                  * list, we will just remove them immediately. b=9829
1606                  */
1607                 if (req->rq_transno != 0 &&
1608                     (req->rq_transno >
1609                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1610                      req->rq_replay)) {
1611                         /** version recovery */
1612                         ptlrpc_save_versions(req);
1613                         ptlrpc_retain_replayable_request(req, imp);
1614                 } else if (req->rq_commit_cb &&
1615                            list_empty(&req->rq_replay_list)) {
1616                         /*
1617                          * NB: don't call rq_commit_cb if it's already on
1618                          * rq_replay_list, ptlrpc_free_committed() will call
1619                          * it later, see LU-3618 for details
1620                          */
1621                         spin_unlock(&imp->imp_lock);
1622                         req->rq_commit_cb(req);
1623                         atomic_inc(&imp->imp_waiting);
1624                         spin_lock(&imp->imp_lock);
1625                         atomic_dec(&imp->imp_waiting);
1626                 }
1627
1628                 /*
1629                  * Replay-enabled imports return commit-status information.
1630                  */
1631                 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1632                 if (likely(committed > imp->imp_peer_committed_transno))
1633                         imp->imp_peer_committed_transno = committed;
1634
1635                 ptlrpc_free_committed(imp);
1636
1637                 if (!list_empty(&imp->imp_replay_list)) {
1638                         struct ptlrpc_request *last;
1639
1640                         last = list_entry(imp->imp_replay_list.prev,
1641                                           struct ptlrpc_request,
1642                                           rq_replay_list);
1643                         /*
1644                          * Requests with rq_replay stay on the list even if no
1645                          * commit is expected.
1646                          */
1647                         if (last->rq_transno > imp->imp_peer_committed_transno)
1648                                 ptlrpc_pinger_commit_expected(imp);
1649                 }
1650
1651                 spin_unlock(&imp->imp_lock);
1652         }
1653
1654         RETURN(rc);
1655 }
1656
1657 /**
1658  * Helper function to send request \a req over the network for the first time
1659  * Also adjusts request phase.
1660  * Returns 0 on success or error code.
1661  */
1662 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1663 {
1664         struct obd_import *imp = req->rq_import;
1665         __u64 min_xid = 0;
1666         int rc;
1667
1668         ENTRY;
1669         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1670
1671         /* do not try to go further if there is not enough memory in enc_pool */
1672         if (req->rq_sent && req->rq_bulk)
1673                 if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
1674                     pool_is_at_full_capacity())
1675                         RETURN(-ENOMEM);
1676
1677         if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1678             (!req->rq_generation_set ||
1679              req->rq_import_generation == imp->imp_generation))
1680                 RETURN(0);
1681
1682         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1683
1684         spin_lock(&imp->imp_lock);
1685
1686         LASSERT(req->rq_xid != 0);
1687         LASSERT(!list_empty(&req->rq_unreplied_list));
1688
1689         if (!req->rq_generation_set)
1690                 req->rq_import_generation = imp->imp_generation;
1691
1692         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1693                 spin_lock(&req->rq_lock);
1694                 req->rq_waiting = 1;
1695                 spin_unlock(&req->rq_lock);
1696
1697                 DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
1698                           ptlrpc_import_state_name(req->rq_send_state),
1699                           ptlrpc_import_state_name(imp->imp_state));
1700                 LASSERT(list_empty(&req->rq_list));
1701                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1702                 atomic_inc(&req->rq_import->imp_inflight);
1703                 spin_unlock(&imp->imp_lock);
1704                 RETURN(0);
1705         }
1706
1707         if (rc != 0) {
1708                 spin_unlock(&imp->imp_lock);
1709                 req->rq_status = rc;
1710                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1711                 RETURN(rc);
1712         }
1713
1714         LASSERT(list_empty(&req->rq_list));
1715         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1716         atomic_inc(&req->rq_import->imp_inflight);
1717
1718         /*
1719          * find the known replied XID from the unreplied list, CONNECT
1720          * and DISCONNECT requests are skipped to make the sanity check
1721          * on server side happy. see process_req_last_xid().
1722          *
1723          * For CONNECT: Because replay requests have lower XID, it'll
1724          * break the sanity check if CONNECT bump the exp_last_xid on
1725          * server.
1726          *
1727          * For DISCONNECT: Since client will abort inflight RPC before
1728          * sending DISCONNECT, DISCONNECT may carry an XID which higher
1729          * than the inflight RPC.
1730          */
1731         if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1732                 min_xid = ptlrpc_known_replied_xid(imp);
1733         spin_unlock(&imp->imp_lock);
1734
1735         lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1736
1737         lustre_msg_set_status(req->rq_reqmsg, current->pid);
1738
1739         /* If the request to be sent is an LDLM callback, do not try to
1740          * refresh context.
1741          * An LDLM callback is sent by a server to a client in order to make
1742          * it release a lock, on a communication channel that uses a reverse
1743          * context. It cannot be refreshed on its own, as it is the 'reverse'
1744          * (server-side) representation of a client context.
1745          * We do not care if the reverse context is expired, and want to send
1746          * the LDLM callback anyway. Once the client receives the AST, it is
1747          * its job to refresh its own context if it has expired, hence
1748          * refreshing the associated reverse context on server side, before
1749          * being able to send the LDLM_CANCEL requested by the server.
1750          */
1751         if (lustre_msg_get_opc(req->rq_reqmsg) != LDLM_BL_CALLBACK &&
1752             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_CP_CALLBACK &&
1753             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_GL_CALLBACK)
1754                 rc = sptlrpc_req_refresh_ctx(req, 0);
1755         if (rc) {
1756                 if (req->rq_err) {
1757                         req->rq_status = rc;
1758                         RETURN(1);
1759                 } else {
1760                         spin_lock(&req->rq_lock);
1761                         req->rq_wait_ctx = 1;
1762                         spin_unlock(&req->rq_lock);
1763                         RETURN(0);
1764                 }
1765         }
1766
1767         CDEBUG(D_RPCTRACE,
1768                "Sending RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
1769                req, current->comm,
1770                imp->imp_obd->obd_uuid.uuid,
1771                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1772                obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg),
1773                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
1774
1775         rc = ptl_send_rpc(req, 0);
1776         if (rc == -ENOMEM) {
1777                 spin_lock(&imp->imp_lock);
1778                 if (!list_empty(&req->rq_list)) {
1779                         list_del_init(&req->rq_list);
1780                         if (atomic_dec_and_test(&req->rq_import->imp_inflight))
1781                                 wake_up(&req->rq_import->imp_recovery_waitq);
1782                 }
1783                 spin_unlock(&imp->imp_lock);
1784                 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1785                 RETURN(rc);
1786         }
1787         if (rc) {
1788                 DEBUG_REQ(D_HA, req, "send failed, expect timeout: rc = %d",
1789                           rc);
1790                 spin_lock(&req->rq_lock);
1791                 req->rq_net_err = 1;
1792                 spin_unlock(&req->rq_lock);
1793                 RETURN(rc);
1794         }
1795         RETURN(0);
1796 }
1797
1798 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1799 {
1800         int remaining, rc;
1801
1802         ENTRY;
1803         LASSERT(set->set_producer != NULL);
1804
1805         remaining = atomic_read(&set->set_remaining);
1806
1807         /*
1808          * populate the ->set_requests list with requests until we
1809          * reach the maximum number of RPCs in flight for this set
1810          */
1811         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1812                 rc = set->set_producer(set, set->set_producer_arg);
1813                 if (rc == -ENOENT) {
1814                         /* no more RPC to produce */
1815                         set->set_producer     = NULL;
1816                         set->set_producer_arg = NULL;
1817                         RETURN(0);
1818                 }
1819         }
1820
1821         RETURN((atomic_read(&set->set_remaining) - remaining));
1822 }
1823
1824 /**
1825  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1826  * and no more replies are expected.
1827  * (it is possible to get less replies than requests sent e.g. due to timed out
1828  * requests or requests that we had trouble to send out)
1829  *
1830  * NOTE: This function contains a potential schedule point (cond_resched()).
1831  */
1832 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1833 {
1834         struct ptlrpc_request *req, *next;
1835         LIST_HEAD(comp_reqs);
1836         int force_timer_recalc = 0;
1837
1838         ENTRY;
1839         if (atomic_read(&set->set_remaining) == 0)
1840                 RETURN(1);
1841
1842         list_for_each_entry_safe(req, next, &set->set_requests,
1843                                  rq_set_chain) {
1844                 struct obd_import *imp = req->rq_import;
1845                 int unregistered = 0;
1846                 int async = 1;
1847                 int rc = 0;
1848
1849                 if (req->rq_phase == RQ_PHASE_COMPLETE) {
1850                         list_move_tail(&req->rq_set_chain, &comp_reqs);
1851                         continue;
1852                 }
1853
1854                 /*
1855                  * This schedule point is mainly for the ptlrpcd caller of this
1856                  * function.  Most ptlrpc sets are not long-lived and unbounded
1857                  * in length, but at the least the set used by the ptlrpcd is.
1858                  * Since the processing time is unbounded, we need to insert an
1859                  * explicit schedule point to make the thread well-behaved.
1860                  */
1861                 cond_resched();
1862
1863                 /*
1864                  * If the caller requires to allow to be interpreted by force
1865                  * and it has really been interpreted, then move the request
1866                  * to RQ_PHASE_INTERPRET phase in spite of what the current
1867                  * phase is.
1868                  */
1869                 if (unlikely(req->rq_allow_intr && req->rq_intr)) {
1870                         req->rq_status = -EINTR;
1871                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1872
1873                         /*
1874                          * Since it is interpreted and we have to wait for
1875                          * the reply to be unlinked, then use sync mode.
1876                          */
1877                         async = 0;
1878
1879                         GOTO(interpret, req->rq_status);
1880                 }
1881
1882                 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
1883                         force_timer_recalc = 1;
1884
1885                 /* delayed send - skip */
1886                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1887                         continue;
1888
1889                 /* delayed resend - skip */
1890                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1891                     req->rq_sent > ktime_get_real_seconds())
1892                         continue;
1893
1894                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1895                       req->rq_phase == RQ_PHASE_BULK ||
1896                       req->rq_phase == RQ_PHASE_INTERPRET ||
1897                       req->rq_phase == RQ_PHASE_UNREG_RPC ||
1898                       req->rq_phase == RQ_PHASE_UNREG_BULK)) {
1899                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1900                         LBUG();
1901                 }
1902
1903                 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
1904                     req->rq_phase == RQ_PHASE_UNREG_BULK) {
1905                         LASSERT(req->rq_next_phase != req->rq_phase);
1906                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1907
1908                         if (req->rq_req_deadline &&
1909                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
1910                                 req->rq_req_deadline = 0;
1911                         if (req->rq_reply_deadline &&
1912                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
1913                                 req->rq_reply_deadline = 0;
1914                         if (req->rq_bulk_deadline &&
1915                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
1916                                 req->rq_bulk_deadline = 0;
1917
1918                         /*
1919                          * Skip processing until reply is unlinked. We
1920                          * can't return to pool before that and we can't
1921                          * call interpret before that. We need to make
1922                          * sure that all rdma transfers finished and will
1923                          * not corrupt any data.
1924                          */
1925                         if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
1926                             ptlrpc_cli_wait_unlink(req))
1927                                 continue;
1928                         if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
1929                             ptlrpc_client_bulk_active(req))
1930                                 continue;
1931
1932                         /*
1933                          * Turn fail_loc off to prevent it from looping
1934                          * forever.
1935                          */
1936                         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1937                                 CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1938                                                      CFS_FAIL_ONCE);
1939                         }
1940                         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1941                                 CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1942                                                      CFS_FAIL_ONCE);
1943                         }
1944
1945                         /*
1946                          * Move to next phase if reply was successfully
1947                          * unlinked.
1948                          */
1949                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1950                 }
1951
1952                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1953                         GOTO(interpret, req->rq_status);
1954
1955                 /*
1956                  * Note that this also will start async reply unlink.
1957                  */
1958                 if (req->rq_net_err && !req->rq_timedout) {
1959                         ptlrpc_expire_one_request(req, 1);
1960
1961                         /*
1962                          * Check if we still need to wait for unlink.
1963                          */
1964                         if (ptlrpc_cli_wait_unlink(req) ||
1965                             ptlrpc_client_bulk_active(req))
1966                                 continue;
1967                         /* If there is no need to resend, fail it now. */
1968                         if (req->rq_no_resend) {
1969                                 if (req->rq_status == 0)
1970                                         req->rq_status = -EIO;
1971                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1972                                 GOTO(interpret, req->rq_status);
1973                         } else {
1974                                 continue;
1975                         }
1976                 }
1977
1978                 if (req->rq_err) {
1979                         if (!ptlrpc_unregister_reply(req, 1)) {
1980                                 ptlrpc_unregister_bulk(req, 1);
1981                                 continue;
1982                         }
1983
1984                         spin_lock(&req->rq_lock);
1985                         req->rq_replied = 0;
1986                         spin_unlock(&req->rq_lock);
1987                         if (req->rq_status == 0)
1988                                 req->rq_status = -EIO;
1989                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1990                         GOTO(interpret, req->rq_status);
1991                 }
1992
1993                 /*
1994                  * ptlrpc_set_wait uses l_wait_event_abortable_timeout()
1995                  * so it sets rq_intr regardless of individual rpc
1996                  * timeouts. The synchronous IO waiting path sets
1997                  * rq_intr irrespective of whether ptlrpcd
1998                  * has seen a timeout.  Our policy is to only interpret
1999                  * interrupted rpcs after they have timed out, so we
2000                  * need to enforce that here.
2001                  */
2002
2003                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
2004                                      req->rq_wait_ctx)) {
2005                         req->rq_status = -EINTR;
2006                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2007                         GOTO(interpret, req->rq_status);
2008                 }
2009
2010                 if (req->rq_phase == RQ_PHASE_RPC) {
2011                         if (req->rq_timedout || req->rq_resend ||
2012                             req->rq_waiting || req->rq_wait_ctx) {
2013                                 int status;
2014
2015                                 if (!ptlrpc_unregister_reply(req, 1)) {
2016                                         ptlrpc_unregister_bulk(req, 1);
2017                                         continue;
2018                                 }
2019
2020                                 spin_lock(&imp->imp_lock);
2021                                 if (ptlrpc_import_delay_req(imp, req,
2022                                                             &status)) {
2023                                         /*
2024                                          * put on delay list - only if we wait
2025                                          * recovery finished - before send
2026                                          */
2027                                         list_move_tail(&req->rq_list,
2028                                                        &imp->imp_delayed_list);
2029                                         spin_unlock(&imp->imp_lock);
2030                                         continue;
2031                                 }
2032
2033                                 if (status != 0)  {
2034                                         req->rq_status = status;
2035                                         ptlrpc_rqphase_move(req,
2036                                                             RQ_PHASE_INTERPRET);
2037                                         spin_unlock(&imp->imp_lock);
2038                                         GOTO(interpret, req->rq_status);
2039                                 }
2040                                 /* ignore on just initiated connections */
2041                                 if (ptlrpc_no_resend(req) &&
2042                                     !req->rq_wait_ctx &&
2043                                     imp->imp_generation !=
2044                                     imp->imp_initiated_at) {
2045                                         req->rq_status = -ENOTCONN;
2046                                         ptlrpc_rqphase_move(req,
2047                                                             RQ_PHASE_INTERPRET);
2048                                         spin_unlock(&imp->imp_lock);
2049                                         GOTO(interpret, req->rq_status);
2050                                 }
2051
2052                                 /* don't resend too fast in case of network
2053                                  * errors.
2054                                  */
2055                                 if (ktime_get_real_seconds() < (req->rq_sent + 1)
2056                                     && req->rq_net_err && req->rq_timedout) {
2057
2058                                         DEBUG_REQ(D_INFO, req,
2059                                                   "throttle request");
2060                                         /* Don't try to resend RPC right away
2061                                          * as it is likely it will fail again
2062                                          * and ptlrpc_check_set() will be
2063                                          * called again, keeping this thread
2064                                          * busy. Instead, wait for the next
2065                                          * timeout. Flag it as resend to
2066                                          * ensure we don't wait to long.
2067                                          */
2068                                         req->rq_resend = 1;
2069                                         spin_unlock(&imp->imp_lock);
2070                                         continue;
2071                                 }
2072
2073                                 list_move_tail(&req->rq_list,
2074                                                &imp->imp_sending_list);
2075
2076                                 spin_unlock(&imp->imp_lock);
2077
2078                                 spin_lock(&req->rq_lock);
2079                                 req->rq_waiting = 0;
2080                                 spin_unlock(&req->rq_lock);
2081
2082                                 if (req->rq_timedout || req->rq_resend) {
2083                                         /*
2084                                          * This is re-sending anyways,
2085                                          * let's mark req as resend.
2086                                          */
2087                                         spin_lock(&req->rq_lock);
2088                                         req->rq_resend = 1;
2089                                         spin_unlock(&req->rq_lock);
2090                                 }
2091                                 /*
2092                                  * rq_wait_ctx is only touched by ptlrpcd,
2093                                  * so no lock is needed here.
2094                                  */
2095                                 status = sptlrpc_req_refresh_ctx(req, 0);
2096                                 if (status) {
2097                                         if (req->rq_err) {
2098                                                 req->rq_status = status;
2099                                                 spin_lock(&req->rq_lock);
2100                                                 req->rq_wait_ctx = 0;
2101                                                 spin_unlock(&req->rq_lock);
2102                                                 force_timer_recalc = 1;
2103                                         } else {
2104                                                 spin_lock(&req->rq_lock);
2105                                                 req->rq_wait_ctx = 1;
2106                                                 spin_unlock(&req->rq_lock);
2107                                         }
2108
2109                                         continue;
2110                                 } else {
2111                                         spin_lock(&req->rq_lock);
2112                                         req->rq_wait_ctx = 0;
2113                                         spin_unlock(&req->rq_lock);
2114                                 }
2115
2116                                 /*
2117                                  * In any case, the previous bulk should be
2118                                  * cleaned up to prepare for the new sending
2119                                  */
2120                                 if (req->rq_bulk &&
2121                                     !ptlrpc_unregister_bulk(req, 1))
2122                                         continue;
2123
2124                                 rc = ptl_send_rpc(req, 0);
2125                                 if (rc == -ENOMEM) {
2126                                         spin_lock(&imp->imp_lock);
2127                                         if (!list_empty(&req->rq_list))
2128                                                 list_del_init(&req->rq_list);
2129                                         spin_unlock(&imp->imp_lock);
2130                                         ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
2131                                         continue;
2132                                 }
2133                                 if (rc) {
2134                                         DEBUG_REQ(D_HA, req,
2135                                                   "send failed: rc = %d", rc);
2136                                         force_timer_recalc = 1;
2137                                         spin_lock(&req->rq_lock);
2138                                         req->rq_net_err = 1;
2139                                         spin_unlock(&req->rq_lock);
2140                                         continue;
2141                                 }
2142                                 /* need to reset the timeout */
2143                                 force_timer_recalc = 1;
2144                         }
2145
2146                         spin_lock(&req->rq_lock);
2147
2148                         if (ptlrpc_client_early(req)) {
2149                                 ptlrpc_at_recv_early_reply(req);
2150                                 spin_unlock(&req->rq_lock);
2151                                 continue;
2152                         }
2153
2154                         /* Still waiting for a reply? */
2155                         if (ptlrpc_client_recv(req)) {
2156                                 spin_unlock(&req->rq_lock);
2157                                 continue;
2158                         }
2159
2160                         /* Did we actually receive a reply? */
2161                         if (!ptlrpc_client_replied(req)) {
2162                                 spin_unlock(&req->rq_lock);
2163                                 continue;
2164                         }
2165
2166                         spin_unlock(&req->rq_lock);
2167
2168                         /*
2169                          * unlink from net because we are going to
2170                          * swab in-place of reply buffer
2171                          */
2172                         unregistered = ptlrpc_unregister_reply(req, 1);
2173                         if (!unregistered)
2174                                 continue;
2175
2176                         req->rq_status = after_reply(req);
2177                         if (req->rq_resend) {
2178                                 force_timer_recalc = 1;
2179                                 continue;
2180                         }
2181
2182                         /*
2183                          * If there is no bulk associated with this request,
2184                          * then we're done and should let the interpreter
2185                          * process the reply. Similarly if the RPC returned
2186                          * an error, and therefore the bulk will never arrive.
2187                          */
2188                         if (!req->rq_bulk || req->rq_status < 0) {
2189                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2190                                 GOTO(interpret, req->rq_status);
2191                         }
2192
2193                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
2194                 }
2195
2196                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
2197                 if (ptlrpc_client_bulk_active(req))
2198                         continue;
2199
2200                 if (req->rq_bulk->bd_failure) {
2201                         /*
2202                          * The RPC reply arrived OK, but the bulk screwed
2203                          * up!  Dead weird since the server told us the RPC
2204                          * was good after getting the REPLY for her GET or
2205                          * the ACK for her PUT.
2206                          */
2207                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed %d/%d/%d",
2208                                   req->rq_status,
2209                                   req->rq_bulk->bd_nob,
2210                                   req->rq_bulk->bd_nob_transferred);
2211                         req->rq_status = -EIO;
2212                 }
2213
2214                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2215
2216 interpret:
2217                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
2218
2219                 /*
2220                  * This moves to "unregistering" phase we need to wait for
2221                  * reply unlink.
2222                  */
2223                 if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
2224                         /* start async bulk unlink too */
2225                         ptlrpc_unregister_bulk(req, 1);
2226                         continue;
2227                 }
2228
2229                 if (!ptlrpc_unregister_bulk(req, async))
2230                         continue;
2231
2232                 /*
2233                  * When calling interpret receiving already should be
2234                  * finished.
2235                  */
2236                 LASSERT(!req->rq_receiving_reply);
2237
2238                 ptlrpc_req_interpret(env, req, req->rq_status);
2239
2240                 if (ptlrpcd_check_work(req)) {
2241                         atomic_dec(&set->set_remaining);
2242                         continue;
2243                 }
2244                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
2245
2246                 if (req->rq_reqmsg)
2247                         CDEBUG(D_RPCTRACE,
2248                                "Completed RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
2249                                req, current->comm,
2250                                imp->imp_obd->obd_uuid.uuid,
2251                                lustre_msg_get_status(req->rq_reqmsg),
2252                                req->rq_xid,
2253                                obd_import_nid2str(imp),
2254                                lustre_msg_get_opc(req->rq_reqmsg),
2255                                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
2256
2257                 spin_lock(&imp->imp_lock);
2258                 /*
2259                  * Request already may be not on sending or delaying list. This
2260                  * may happen in the case of marking it erroneous for the case
2261                  * ptlrpc_import_delay_req(req, status) find it impossible to
2262                  * allow sending this rpc and returns *status != 0.
2263                  */
2264                 if (!list_empty(&req->rq_list)) {
2265                         list_del_init(&req->rq_list);
2266                         if (atomic_dec_and_test(&imp->imp_inflight))
2267                                 wake_up(&imp->imp_recovery_waitq);
2268                 }
2269                 list_del_init(&req->rq_unreplied_list);
2270                 spin_unlock(&imp->imp_lock);
2271
2272                 atomic_dec(&set->set_remaining);
2273                 wake_up(&imp->imp_recovery_waitq);
2274
2275                 if (set->set_producer) {
2276                         /* produce a new request if possible */
2277                         if (ptlrpc_set_producer(set) > 0)
2278                                 force_timer_recalc = 1;
2279
2280                         /*
2281                          * free the request that has just been completed
2282                          * in order not to pollute set->set_requests
2283                          */
2284                         list_del_init(&req->rq_set_chain);
2285                         spin_lock(&req->rq_lock);
2286                         req->rq_set = NULL;
2287                         req->rq_invalid_rqset = 0;
2288                         spin_unlock(&req->rq_lock);
2289
2290                         /* record rq_status to compute the final status later */
2291                         if (req->rq_status != 0)
2292                                 set->set_rc = req->rq_status;
2293                         ptlrpc_req_finished(req);
2294                 } else {
2295                         list_move_tail(&req->rq_set_chain, &comp_reqs);
2296                 }
2297         }
2298
2299         /*
2300          * move completed request at the head of list so it's easier for
2301          * caller to find them
2302          */
2303         list_splice(&comp_reqs, &set->set_requests);
2304
2305         /* If we hit an error, we want to recover promptly. */
2306         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
2307 }
2308 EXPORT_SYMBOL(ptlrpc_check_set);
2309
2310 /**
2311  * Time out request \a req. is \a async_unlink is set, that means do not wait
2312  * until LNet actually confirms network buffer unlinking.
2313  * Return 1 if we should give up further retrying attempts or 0 otherwise.
2314  */
2315 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
2316 {
2317         struct obd_import *imp = req->rq_import;
2318         unsigned int debug_mask = D_RPCTRACE;
2319         int rc = 0;
2320         __u32 opc;
2321
2322         ENTRY;
2323         spin_lock(&req->rq_lock);
2324         req->rq_timedout = 1;
2325         spin_unlock(&req->rq_lock);
2326
2327         opc = lustre_msg_get_opc(req->rq_reqmsg);
2328         if (ptlrpc_console_allow(req, opc,
2329                                  lustre_msg_get_status(req->rq_reqmsg)))
2330                 debug_mask = D_WARNING;
2331         DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
2332                   req->rq_net_err ? "failed due to network error" :
2333                      ((req->rq_real_sent == 0 ||
2334                        req->rq_real_sent < req->rq_sent ||
2335                        req->rq_real_sent >= req->rq_deadline) ?
2336                       "timed out for sent delay" : "timed out for slow reply"),
2337                   req->rq_sent, req->rq_real_sent);
2338
2339         if (imp && obd_debug_peer_on_timeout)
2340                 LNetDebugPeer(&imp->imp_connection->c_peer);
2341
2342         ptlrpc_unregister_reply(req, async_unlink);
2343         ptlrpc_unregister_bulk(req, async_unlink);
2344
2345         if (obd_dump_on_timeout)
2346                 libcfs_debug_dumplog();
2347
2348         if (!imp) {
2349                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
2350                 RETURN(1);
2351         }
2352
2353         atomic_inc(&imp->imp_timeouts);
2354
2355         /* The DLM server doesn't want recovery run on its imports. */
2356         if (imp->imp_dlm_fake)
2357                 RETURN(1);
2358
2359         /*
2360          * If this request is for recovery or other primordial tasks,
2361          * then error it out here.
2362          */
2363         if (req->rq_ctx_init || req->rq_ctx_fini ||
2364             req->rq_send_state != LUSTRE_IMP_FULL ||
2365             imp->imp_obd->obd_no_recov) {
2366                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
2367                           ptlrpc_import_state_name(req->rq_send_state),
2368                           ptlrpc_import_state_name(imp->imp_state));
2369                 spin_lock(&req->rq_lock);
2370                 req->rq_status = -ETIMEDOUT;
2371                 req->rq_err = 1;
2372                 spin_unlock(&req->rq_lock);
2373                 RETURN(1);
2374         }
2375
2376         /*
2377          * if a request can't be resent we can't wait for an answer after
2378          * the timeout
2379          */
2380         if (ptlrpc_no_resend(req)) {
2381                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
2382                 rc = 1;
2383         }
2384
2385         if (opc != OBD_PING || req->rq_xid > imp->imp_highest_replied_xid)
2386                 ptlrpc_fail_import(imp,
2387                                    lustre_msg_get_conn_cnt(req->rq_reqmsg));
2388
2389         RETURN(rc);
2390 }
2391
2392 /**
2393  * Time out all uncompleted requests in request set pointed by \a data
2394  * This is called when a wait times out.
2395  */
2396 void ptlrpc_expired_set(struct ptlrpc_request_set *set)
2397 {
2398         struct ptlrpc_request *req;
2399         time64_t now = ktime_get_real_seconds();
2400
2401         ENTRY;
2402         LASSERT(set != NULL);
2403
2404         /*
2405          * A timeout expired. See which reqs it applies to...
2406          */
2407         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2408                 /* don't expire request waiting for context */
2409                 if (req->rq_wait_ctx)
2410                         continue;
2411
2412                 /* Request in-flight? */
2413                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2414                        !req->rq_waiting && !req->rq_resend) ||
2415                       (req->rq_phase == RQ_PHASE_BULK)))
2416                         continue;
2417
2418                 if (req->rq_timedout ||     /* already dealt with */
2419                     req->rq_deadline > now) /* not expired */
2420                         continue;
2421
2422                 /*
2423                  * Deal with this guy. Do it asynchronously to not block
2424                  * ptlrpcd thread.
2425                  */
2426                 ptlrpc_expire_one_request(req, 1);
2427                 /*
2428                  * Loops require that we resched once in a while to avoid
2429                  * RCU stalls and a few other problems.
2430                  */
2431                 cond_resched();
2432
2433         }
2434 }
2435
2436 /**
2437  * Interrupts (sets interrupted flag) all uncompleted requests in
2438  * a set \a data. This is called when a wait_event is interrupted
2439  * by a signal.
2440  */
2441 static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set)
2442 {
2443         struct ptlrpc_request *req;
2444
2445         LASSERT(set != NULL);
2446         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2447
2448         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2449                 if (req->rq_intr)
2450                         continue;
2451
2452                 if (req->rq_phase != RQ_PHASE_RPC &&
2453                     req->rq_phase != RQ_PHASE_UNREG_RPC &&
2454                     !req->rq_allow_intr)
2455                         continue;
2456
2457                 spin_lock(&req->rq_lock);
2458                 req->rq_intr = 1;
2459                 spin_unlock(&req->rq_lock);
2460         }
2461 }
2462
2463 /**
2464  * Get the smallest timeout in the set; this does NOT set a timeout.
2465  */
2466 time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2467 {
2468         time64_t now = ktime_get_real_seconds();
2469         int timeout = 0;
2470         struct ptlrpc_request *req;
2471         time64_t deadline;
2472
2473         ENTRY;
2474         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2475                 /* Request in-flight? */
2476                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2477                       (req->rq_phase == RQ_PHASE_BULK) ||
2478                       (req->rq_phase == RQ_PHASE_NEW)))
2479                         continue;
2480
2481                 /* Already timed out. */
2482                 if (req->rq_timedout)
2483                         continue;
2484
2485                 /* Waiting for ctx. */
2486                 if (req->rq_wait_ctx)
2487                         continue;
2488
2489                 if (req->rq_phase == RQ_PHASE_NEW)
2490                         deadline = req->rq_sent;
2491                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2492                         deadline = req->rq_sent;
2493                 else
2494                         deadline = req->rq_sent + req->rq_timeout;
2495
2496                 if (deadline <= now)    /* actually expired already */
2497                         timeout = 1;    /* ASAP */
2498                 else if (timeout == 0 || timeout > deadline - now)
2499                         timeout = deadline - now;
2500         }
2501         RETURN(timeout);
2502 }
2503
2504 /**
2505  * Send all unset request from the set and then wait untill all
2506  * requests in the set complete (either get a reply, timeout, get an
2507  * error or otherwise be interrupted).
2508  * Returns 0 on success or error code otherwise.
2509  */
2510 int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set)
2511 {
2512         struct ptlrpc_request *req;
2513         time64_t timeout;
2514         int rc;
2515
2516         ENTRY;
2517         if (set->set_producer)
2518                 (void)ptlrpc_set_producer(set);
2519         else
2520                 list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2521                         if (req->rq_phase == RQ_PHASE_NEW)
2522                                 (void)ptlrpc_send_new_req(req);
2523                 }
2524
2525         if (list_empty(&set->set_requests))
2526                 RETURN(0);
2527
2528         do {
2529                 timeout = ptlrpc_set_next_timeout(set);
2530
2531                 /*
2532                  * wait until all complete, interrupted, or an in-flight
2533                  * req times out
2534                  */
2535                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
2536                        set, timeout);
2537
2538                 if ((timeout == 0 && !signal_pending(current)) ||
2539                     set->set_allow_intr) {
2540                         /*
2541                          * No requests are in-flight (ether timed out
2542                          * or delayed), so we can allow interrupts.
2543                          * We still want to block for a limited time,
2544                          * so we allow interrupts during the timeout.
2545                          */
2546                         rc = l_wait_event_abortable_timeout(
2547                                 set->set_waitq,
2548                                 ptlrpc_check_set(NULL, set),
2549                                 cfs_time_seconds(timeout ? timeout : 1));
2550                         if (rc == 0) {
2551                                 rc = -ETIMEDOUT;
2552                                 ptlrpc_expired_set(set);
2553                         } else if (rc < 0) {
2554                                 rc = -EINTR;
2555                                 ptlrpc_interrupted_set(set);
2556                         } else {
2557                                 rc = 0;
2558                         }
2559                 } else {
2560                         /*
2561                          * At least one request is in flight, so no
2562                          * interrupts are allowed. Wait until all
2563                          * complete, or an in-flight req times out.
2564                          */
2565                         rc = wait_event_idle_timeout(
2566                                 set->set_waitq,
2567                                 ptlrpc_check_set(NULL, set),
2568                                 cfs_time_seconds(timeout ? timeout : 1));
2569                         if (rc == 0) {
2570                                 ptlrpc_expired_set(set);
2571                                 rc = -ETIMEDOUT;
2572                         } else {
2573                                 rc = 0;
2574                         }
2575
2576                         /*
2577                          * LU-769 - if we ignored the signal because
2578                          * it was already pending when we started, we
2579                          * need to handle it now or we risk it being
2580                          * ignored forever
2581                          */
2582                         if (rc == -ETIMEDOUT &&
2583                             signal_pending(current)) {
2584                                 sigset_t old, new;
2585
2586                                 siginitset(&new, LUSTRE_FATAL_SIGS);
2587                                 sigprocmask(SIG_BLOCK, &new, &old);
2588                                 /*
2589                                  * In fact we only interrupt for the
2590                                  * "fatal" signals like SIGINT or
2591                                  * SIGKILL. We still ignore less
2592                                  * important signals since ptlrpc set
2593                                  * is not easily reentrant from
2594                                  * userspace again
2595                                  */
2596                                 if (signal_pending(current))
2597                                         ptlrpc_interrupted_set(set);
2598                                 sigprocmask(SIG_SETMASK, &old, NULL);
2599                         }
2600                 }
2601
2602                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2603
2604                 /*
2605                  * -EINTR => all requests have been flagged rq_intr so next
2606                  * check completes.
2607                  * -ETIMEDOUT => someone timed out.  When all reqs have
2608                  * timed out, signals are enabled allowing completion with
2609                  * EINTR.
2610                  * I don't really care if we go once more round the loop in
2611                  * the error cases -eeb.
2612                  */
2613                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2614                         list_for_each_entry(req, &set->set_requests,
2615                                             rq_set_chain) {
2616                                 spin_lock(&req->rq_lock);
2617                                 req->rq_invalid_rqset = 1;
2618                                 spin_unlock(&req->rq_lock);
2619                         }
2620                 }
2621         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2622
2623         LASSERT(atomic_read(&set->set_remaining) == 0);
2624
2625         rc = set->set_rc; /* rq_status of already freed requests if any */
2626         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2627                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2628                 if (req->rq_status != 0)
2629                         rc = req->rq_status;
2630         }
2631
2632         RETURN(rc);
2633 }
2634 EXPORT_SYMBOL(ptlrpc_set_wait);
2635
2636 /**
2637  * Helper fuction for request freeing.
2638  * Called when request count reached zero and request needs to be freed.
2639  * Removes request from all sorts of sending/replay lists it might be on,
2640  * frees network buffers if any are present.
2641  * If \a locked is set, that means caller is already holding import imp_lock
2642  * and so we no longer need to reobtain it (for certain lists manipulations)
2643  */
2644 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2645 {
2646         ENTRY;
2647
2648         if (!request)
2649                 RETURN_EXIT;
2650
2651         LASSERT(!request->rq_srv_req);
2652         LASSERT(request->rq_export == NULL);
2653         LASSERTF(!request->rq_receiving_reply, "req %px\n", request);
2654         LASSERTF(list_empty(&request->rq_list), "req %px\n", request);
2655         LASSERTF(list_empty(&request->rq_set_chain), "req %px\n", request);
2656         LASSERTF(!request->rq_replay, "req %px\n", request);
2657
2658         req_capsule_fini(&request->rq_pill);
2659
2660         /*
2661          * We must take it off the imp_replay_list first.  Otherwise, we'll set
2662          * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2663          */
2664         if (request->rq_import) {
2665                 if (!locked)
2666                         spin_lock(&request->rq_import->imp_lock);
2667                 list_del_init(&request->rq_replay_list);
2668                 list_del_init(&request->rq_unreplied_list);
2669                 if (!locked)
2670                         spin_unlock(&request->rq_import->imp_lock);
2671         }
2672         LASSERTF(list_empty(&request->rq_replay_list), "req %px\n", request);
2673
2674         if (atomic_read(&request->rq_refcount) != 0) {
2675                 DEBUG_REQ(D_ERROR, request,
2676                           "freeing request with nonzero refcount");
2677                 LBUG();
2678         }
2679
2680         if (request->rq_repbuf)
2681                 sptlrpc_cli_free_repbuf(request);
2682
2683         if (request->rq_import) {
2684                 if (!ptlrpcd_check_work(request)) {
2685                         LASSERT(atomic_read(&request->rq_import->imp_reqs) > 0);
2686                         atomic_dec(&request->rq_import->imp_reqs);
2687                 }
2688                 class_import_put(request->rq_import);
2689                 request->rq_import = NULL;
2690         }
2691         if (request->rq_bulk)
2692                 ptlrpc_free_bulk(request->rq_bulk);
2693
2694         if (request->rq_reqbuf || request->rq_clrbuf)
2695                 sptlrpc_cli_free_reqbuf(request);
2696
2697         if (request->rq_cli_ctx)
2698                 sptlrpc_req_put_ctx(request, !locked);
2699
2700         if (request->rq_pool)
2701                 __ptlrpc_free_req_to_pool(request);
2702         else
2703                 ptlrpc_request_cache_free(request);
2704         EXIT;
2705 }
2706
2707 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
2708 /**
2709  * Drop one request reference. Must be called with import imp_lock held.
2710  * When reference count drops to zero, request is freed.
2711  */
2712 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
2713 {
2714         assert_spin_locked(&request->rq_import->imp_lock);
2715         (void)__ptlrpc_req_finished(request, 1);
2716 }
2717
2718 /**
2719  * Helper function
2720  * Drops one reference count for request \a request.
2721  * \a locked set indicates that caller holds import imp_lock.
2722  * Frees the request whe reference count reaches zero.
2723  *
2724  * \retval 1    the request is freed
2725  * \retval 0    some others still hold references on the request
2726  */
2727 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
2728 {
2729         int count;
2730
2731         ENTRY;
2732         if (!request)
2733                 RETURN(1);
2734
2735         LASSERT(request != LP_POISON);
2736         LASSERT(request->rq_reqmsg != LP_POISON);
2737
2738         DEBUG_REQ(D_INFO, request, "refcount now %u",
2739                   atomic_read(&request->rq_refcount) - 1);
2740
2741         spin_lock(&request->rq_lock);
2742         count = atomic_dec_return(&request->rq_refcount);
2743         LASSERTF(count >= 0, "Invalid ref count %d\n", count);
2744
2745         /*
2746          * For open RPC, the client does not know the EA size (LOV, ACL, and
2747          * so on) before replied, then the client has to reserve very large
2748          * reply buffer. Such buffer will not be released until the RPC freed.
2749          * Since The open RPC is replayable, we need to keep it in the replay
2750          * list until close. If there are a lot of files opened concurrently,
2751          * then the client may be OOM.
2752          *
2753          * If fact, it is unnecessary to keep reply buffer for open replay,
2754          * related EAs have already been saved via mdc_save_lovea() before
2755          * coming here. So it is safe to free the reply buffer some earlier
2756          * before releasing the RPC to avoid client OOM. LU-9514
2757          */
2758         if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
2759                 spin_lock(&request->rq_early_free_lock);
2760                 sptlrpc_cli_free_repbuf(request);
2761                 request->rq_repbuf = NULL;
2762                 request->rq_repbuf_len = 0;
2763                 request->rq_repdata = NULL;
2764                 request->rq_reqdata_len = 0;
2765                 spin_unlock(&request->rq_early_free_lock);
2766         }
2767         spin_unlock(&request->rq_lock);
2768
2769         if (!count)
2770                 __ptlrpc_free_req(request, locked);
2771
2772         RETURN(!count);
2773 }
2774
2775 /**
2776  * Drops one reference count for a request.
2777  */
2778 void ptlrpc_req_finished(struct ptlrpc_request *request)
2779 {
2780         __ptlrpc_req_finished(request, 0);
2781 }
2782 EXPORT_SYMBOL(ptlrpc_req_finished);
2783
2784 /**
2785  * Returns xid of a \a request
2786  */
2787 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2788 {
2789         return request->rq_xid;
2790 }
2791 EXPORT_SYMBOL(ptlrpc_req_xid);
2792
2793 /**
2794  * Disengage the client's reply buffer from the network
2795  * NB does _NOT_ unregister any client-side bulk.
2796  * IDEMPOTENT, but _not_ safe against concurrent callers.
2797  * The request owner (i.e. the thread doing the I/O) must call...
2798  * Returns 0 on success or 1 if unregistering cannot be made.
2799  */
2800 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2801 {
2802         bool discard = false;
2803         /*
2804          * Might sleep.
2805          */
2806         LASSERT(!in_interrupt());
2807
2808         /* Let's setup deadline for reply unlink. */
2809         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2810             async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
2811                 request->rq_reply_deadline = ktime_get_real_seconds() +
2812                                              PTLRPC_REQ_LONG_UNLINK;
2813
2814         /*
2815          * Nothing left to do.
2816          */
2817         if (!__ptlrpc_cli_wait_unlink(request, &discard))
2818                 RETURN(1);
2819
2820         LNetMDUnlink(request->rq_reply_md_h);
2821
2822         if (discard) /* Discard the request-out callback */
2823                 __LNetMDUnlink(request->rq_req_md_h, discard);
2824
2825         /*
2826          * Let's check it once again.
2827          */
2828         if (!ptlrpc_cli_wait_unlink(request))
2829                 RETURN(1);
2830
2831         /* Move to "Unregistering" phase as reply was not unlinked yet. */
2832         ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
2833
2834         /*
2835          * Do not wait for unlink to finish.
2836          */
2837         if (async)
2838                 RETURN(0);
2839
2840         /*
2841          * We have to wait_event_idle_timeout() whatever the result, to get
2842          * a chance to run reply_in_callback(), and to make sure we've
2843          * unlinked before returning a req to the pool.
2844          */
2845         for (;;) {
2846                 wait_queue_head_t *wq = (request->rq_set) ?
2847                                         &request->rq_set->set_waitq :
2848                                         &request->rq_reply_waitq;
2849                 int seconds = PTLRPC_REQ_LONG_UNLINK;
2850                 /*
2851                  * Network access will complete in finite time but the HUGE
2852                  * timeout lets us CWARN for visibility of sluggish NALs
2853                  */
2854                 while (seconds > 0 &&
2855                        wait_event_idle_timeout(
2856                                *wq,
2857                                !ptlrpc_cli_wait_unlink(request),
2858                                cfs_time_seconds(1)) == 0)
2859                         seconds -= 1;
2860                 if (seconds > 0) {
2861                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2862                         RETURN(1);
2863                 }
2864
2865                 DEBUG_REQ(D_WARNING, request,
2866                           "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2867                           request->rq_receiving_reply,
2868                           request->rq_req_unlinked,
2869                           request->rq_reply_unlinked);
2870         }
2871         RETURN(0);
2872 }
2873
2874 static void ptlrpc_free_request(struct ptlrpc_request *req)
2875 {
2876         spin_lock(&req->rq_lock);
2877         req->rq_replay = 0;
2878         spin_unlock(&req->rq_lock);
2879
2880         if (req->rq_commit_cb)
2881                 req->rq_commit_cb(req);
2882         list_del_init(&req->rq_replay_list);
2883
2884         __ptlrpc_req_finished(req, 1);
2885 }
2886
2887 /**
2888  * the request is committed and dropped from the replay list of its import
2889  */
2890 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2891 {
2892         struct obd_import *imp = req->rq_import;
2893
2894         spin_lock(&imp->imp_lock);
2895         if (list_empty(&req->rq_replay_list)) {
2896                 spin_unlock(&imp->imp_lock);
2897                 return;
2898         }
2899
2900         if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
2901                 if (imp->imp_replay_cursor == &req->rq_replay_list)
2902                         imp->imp_replay_cursor = req->rq_replay_list.next;
2903                 ptlrpc_free_request(req);
2904         }
2905
2906         spin_unlock(&imp->imp_lock);
2907 }
2908 EXPORT_SYMBOL(ptlrpc_request_committed);
2909
2910 /**
2911  * Iterates through replay_list on import and prunes
2912  * all requests have transno smaller than last_committed for the
2913  * import and don't have rq_replay set.
2914  * Since requests are sorted in transno order, stops when meeting first
2915  * transno bigger than last_committed.
2916  * caller must hold imp->imp_lock
2917  */
2918 void ptlrpc_free_committed(struct obd_import *imp)
2919 {
2920         struct ptlrpc_request *req, *saved;
2921         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
2922         bool skip_committed_list = true;
2923         unsigned int replay_scanned = 0, replay_freed = 0;
2924         unsigned int commit_scanned = 0, commit_freed = 0;
2925         unsigned int debug_level = D_INFO;
2926         __u64 peer_committed_transno;
2927         int imp_generation;
2928         time64_t start, now;
2929
2930         ENTRY;
2931         LASSERT(imp != NULL);
2932         assert_spin_locked(&imp->imp_lock);
2933
2934         start = ktime_get_seconds();
2935         /* save these here, we can potentially drop imp_lock after checking */
2936         peer_committed_transno = imp->imp_peer_committed_transno;
2937         imp_generation = imp->imp_generation;
2938
2939         if (peer_committed_transno == imp->imp_last_transno_checked &&
2940             imp_generation == imp->imp_last_generation_checked) {
2941                 CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
2942                        imp->imp_obd->obd_name, peer_committed_transno);
2943                 RETURN_EXIT;
2944         }
2945         CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
2946                imp->imp_obd->obd_name, peer_committed_transno, imp_generation);
2947
2948         if (imp_generation != imp->imp_last_generation_checked ||
2949             imp->imp_last_transno_checked == 0)
2950                 skip_committed_list = false;
2951         /* maybe drop imp_lock here, if another lock protected the lists */
2952
2953         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2954                                  rq_replay_list) {
2955                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2956                 LASSERT(req != last_req);
2957                 last_req = req;
2958
2959                 if (req->rq_transno == 0) {
2960                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2961                         LBUG();
2962                 }
2963
2964                 /* If other threads are waiting on imp_lock, stop processing
2965                  * in this thread. Another thread can finish remaining work.
2966                  * This may happen if there are huge numbers of open files
2967                  * that are closed suddenly or evicted, or if the server
2968                  * commit interval is very high vs. RPC rate.
2969                  */
2970                 if (++replay_scanned % 2048 == 0) {
2971                         now = ktime_get_seconds();
2972                         if (now > start + 5)
2973                                 debug_level = D_WARNING;
2974
2975                         if ((replay_freed > 128 && now > start + 3) &&
2976                             atomic_read(&imp->imp_waiting)) {
2977                                 if (debug_level == D_INFO)
2978                                         debug_level = D_RPCTRACE;
2979                                 break;
2980                         }
2981                 }
2982
2983                 if (req->rq_import_generation < imp_generation) {
2984                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2985                         GOTO(free_req, 0);
2986                 }
2987
2988                 /* not yet committed */
2989                 if (req->rq_transno > peer_committed_transno) {
2990                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2991                         break;
2992                 }
2993
2994                 if (req->rq_replay) {
2995                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
2996                         list_move_tail(&req->rq_replay_list,
2997                                        &imp->imp_committed_list);
2998                         continue;
2999                 }
3000
3001                 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
3002                           peer_committed_transno);
3003 free_req:
3004                 replay_freed++;
3005                 ptlrpc_free_request(req);
3006         }
3007
3008         if (skip_committed_list)
3009                 GOTO(out, 0);
3010
3011         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
3012                                  rq_replay_list) {
3013                 LASSERT(req->rq_transno != 0);
3014
3015                 /* If other threads are waiting on imp_lock, stop processing
3016                  * in this thread. Another thread can finish remaining work. */
3017                 if (++commit_scanned % 2048 == 0) {
3018                         now = ktime_get_seconds();
3019                         if (now > start + 6)
3020                                 debug_level = D_WARNING;
3021
3022                         if ((commit_freed > 128 && now > start + 4) &&
3023                             atomic_read(&imp->imp_waiting)) {
3024                                 if (debug_level == D_INFO)
3025                                         debug_level = D_RPCTRACE;
3026                                 break;
3027                         }
3028                 }
3029
3030                 if (req->rq_import_generation < imp_generation ||
3031                     !req->rq_replay) {
3032                         DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
3033                                   req->rq_import_generation <
3034                                   imp_generation ? "stale" : "closed");
3035
3036                         if (imp->imp_replay_cursor == &req->rq_replay_list)
3037                                 imp->imp_replay_cursor =
3038                                         req->rq_replay_list.next;
3039
3040                         commit_freed++;
3041                         ptlrpc_free_request(req);
3042                 }
3043         }
3044 out:
3045         /* if full lists processed without interruption, avoid next scan */
3046         if (debug_level == D_INFO) {
3047                 imp->imp_last_transno_checked = peer_committed_transno;
3048                 imp->imp_last_generation_checked = imp_generation;
3049         }
3050
3051         CDEBUG_LIMIT(debug_level,
3052                      "%s: %s: skip=%u replay=%u/%u committed=%u/%u\n",
3053                      imp->imp_obd->obd_name,
3054                      debug_level == D_INFO ? "normal" : "overloaded",
3055                      skip_committed_list, replay_freed, replay_scanned,
3056                      commit_freed, commit_scanned);
3057         EXIT;
3058 }
3059
3060 void ptlrpc_cleanup_client(struct obd_import *imp)
3061 {
3062         ENTRY;
3063         EXIT;
3064 }
3065
3066 /**
3067  * Schedule previously sent request for resend.
3068  * For bulk requests we assign new xid (to avoid problems with
3069  * lost replies and therefore several transfers landing into same buffer
3070  * from different sending attempts).
3071  */
3072 void ptlrpc_resend_req(struct ptlrpc_request *req)
3073 {
3074         DEBUG_REQ(D_HA, req, "going to resend");
3075         spin_lock(&req->rq_lock);
3076
3077         /*
3078          * Request got reply but linked to the import list still.
3079          * Let ptlrpc_check_set() process it.
3080          */
3081         if (ptlrpc_client_replied(req)) {
3082                 spin_unlock(&req->rq_lock);
3083                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
3084                 return;
3085         }
3086
3087         req->rq_status = -EAGAIN;
3088
3089         req->rq_resend = 1;
3090         req->rq_net_err = 0;
3091         req->rq_timedout = 0;
3092
3093         ptlrpc_client_wake_req(req);
3094         spin_unlock(&req->rq_lock);
3095 }
3096
3097 /* XXX: this function and rq_status are currently unused */
3098 void ptlrpc_restart_req(struct ptlrpc_request *req)
3099 {
3100         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
3101         req->rq_status = -ERESTARTSYS;
3102
3103         spin_lock(&req->rq_lock);
3104         req->rq_restart = 1;
3105         req->rq_timedout = 0;
3106         ptlrpc_client_wake_req(req);
3107         spin_unlock(&req->rq_lock);
3108 }
3109
3110 /**
3111  * Grab additional reference on a request \a req
3112  */
3113 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
3114 {
3115         ENTRY;
3116         atomic_inc(&req->rq_refcount);
3117         RETURN(req);
3118 }
3119 EXPORT_SYMBOL(ptlrpc_request_addref);
3120
3121 /**
3122  * Add a request to import replay_list.
3123  * Must be called under imp_lock
3124  */
3125 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
3126                                       struct obd_import *imp)
3127 {
3128         struct ptlrpc_request *iter;
3129
3130         assert_spin_locked(&imp->imp_lock);
3131
3132         if (req->rq_transno == 0) {
3133                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
3134                 LBUG();
3135         }
3136
3137         /*
3138          * clear this for new requests that were resent as well
3139          * as resent replayed requests.
3140          */
3141         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3142
3143         /* don't re-add requests that have been replayed */
3144         if (!list_empty(&req->rq_replay_list))
3145                 return;
3146
3147         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
3148
3149         spin_lock(&req->rq_lock);
3150         req->rq_resend = 0;
3151         spin_unlock(&req->rq_lock);
3152
3153         LASSERT(imp->imp_replayable);
3154         /* Balanced in ptlrpc_free_committed, usually. */
3155         ptlrpc_request_addref(req);
3156         list_for_each_entry_reverse(iter, &imp->imp_replay_list,
3157                                     rq_replay_list) {
3158                 /*
3159                  * We may have duplicate transnos if we create and then
3160                  * open a file, or for closes retained if to match creating
3161                  * opens, so use req->rq_xid as a secondary key.
3162                  * (See bugs 684, 685, and 428.)
3163                  * XXX no longer needed, but all opens need transnos!
3164                  */
3165                 if (iter->rq_transno > req->rq_transno)
3166                         continue;
3167
3168                 if (iter->rq_transno == req->rq_transno) {
3169                         LASSERT(iter->rq_xid != req->rq_xid);
3170                         if (iter->rq_xid > req->rq_xid)
3171                                 continue;
3172                 }
3173
3174                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
3175                 return;
3176         }
3177
3178         list_add(&req->rq_replay_list, &imp->imp_replay_list);
3179 }
3180
3181 /**
3182  * Send request and wait until it completes.
3183  * Returns request processing status.
3184  */
3185 int ptlrpc_queue_wait(struct ptlrpc_request *req)
3186 {
3187         struct ptlrpc_request_set *set;
3188         int rc;
3189
3190         ENTRY;
3191         LASSERT(req->rq_set == NULL);
3192         LASSERT(!req->rq_receiving_reply);
3193
3194         set = ptlrpc_prep_set();
3195         if (!set) {
3196                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
3197                 RETURN(-ENOMEM);
3198         }
3199
3200         /* for distributed debugging */
3201         lustre_msg_set_status(req->rq_reqmsg, current->pid);
3202
3203         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
3204         ptlrpc_request_addref(req);
3205         ptlrpc_set_add_req(set, req);
3206         rc = ptlrpc_set_wait(NULL, set);
3207         ptlrpc_set_destroy(set);
3208
3209         RETURN(rc);
3210 }
3211 EXPORT_SYMBOL(ptlrpc_queue_wait);
3212
3213 /**
3214  * Callback used for replayed requests reply processing.
3215  * In case of successful reply calls registered request replay callback.
3216  * In case of error restart replay process.
3217  */
3218 static int ptlrpc_replay_interpret(const struct lu_env *env,
3219                                    struct ptlrpc_request *req,
3220                                    void *args, int rc)
3221 {
3222         struct ptlrpc_replay_async_args *aa = args;
3223         struct obd_import *imp = req->rq_import;
3224
3225         ENTRY;
3226         atomic_dec(&imp->imp_replay_inflight);
3227
3228         /*
3229          * Note: if it is bulk replay (MDS-MDS replay), then even if
3230          * server got the request, but bulk transfer timeout, let's
3231          * replay the bulk req again
3232          */
3233         if (!ptlrpc_client_replied(req) ||
3234             (req->rq_bulk &&
3235              lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
3236                 DEBUG_REQ(D_ERROR, req, "request replay timed out");
3237                 GOTO(out, rc = -ETIMEDOUT);
3238         }
3239
3240         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
3241             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
3242             lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
3243                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
3244
3245         /** VBR: check version failure */
3246         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
3247                 /** replay was failed due to version mismatch */
3248                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay");
3249                 spin_lock(&imp->imp_lock);
3250                 imp->imp_vbr_failed = 1;
3251                 spin_unlock(&imp->imp_lock);
3252                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3253         } else {
3254                 /** The transno had better not change over replay. */
3255                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
3256                          lustre_msg_get_transno(req->rq_repmsg) ||
3257                          lustre_msg_get_transno(req->rq_repmsg) == 0,
3258                          "%#llx/%#llx\n",
3259                          lustre_msg_get_transno(req->rq_reqmsg),
3260                          lustre_msg_get_transno(req->rq_repmsg));
3261         }
3262
3263         spin_lock(&imp->imp_lock);
3264         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
3265         spin_unlock(&imp->imp_lock);
3266         LASSERT(imp->imp_last_replay_transno);
3267
3268         /* transaction number shouldn't be bigger than the latest replayed */
3269         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
3270                 DEBUG_REQ(D_ERROR, req,
3271                           "Reported transno=%llu is bigger than replayed=%llu",
3272                           req->rq_transno,
3273                           lustre_msg_get_transno(req->rq_reqmsg));
3274                 GOTO(out, rc = -EINVAL);
3275         }
3276
3277         DEBUG_REQ(D_HA, req, "got reply");
3278
3279         /* let the callback do fixups, possibly including in the request */
3280         if (req->rq_replay_cb)
3281                 req->rq_replay_cb(req);
3282
3283         if (ptlrpc_client_replied(req) &&
3284             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
3285                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
3286                           lustre_msg_get_status(req->rq_repmsg),
3287                           aa->praa_old_status);
3288
3289                 /*
3290                  * Note: If the replay fails for MDT-MDT recovery, let's
3291                  * abort all of the following requests in the replay
3292                  * and sending list, because MDT-MDT update requests
3293                  * are dependent on each other, see LU-7039
3294                  */
3295                 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
3296                         struct ptlrpc_request *free_req;
3297                         struct ptlrpc_request *tmp;
3298
3299                         spin_lock(&imp->imp_lock);
3300                         list_for_each_entry_safe(free_req, tmp,
3301                                                  &imp->imp_replay_list,
3302                                                  rq_replay_list) {
3303                                 ptlrpc_free_request(free_req);
3304                         }
3305
3306                         list_for_each_entry_safe(free_req, tmp,
3307                                                  &imp->imp_committed_list,
3308                                                  rq_replay_list) {
3309                                 ptlrpc_free_request(free_req);
3310                         }
3311
3312                         list_for_each_entry_safe(free_req, tmp,
3313                                                  &imp->imp_delayed_list,
3314                                                  rq_list) {
3315                                 spin_lock(&free_req->rq_lock);
3316                                 free_req->rq_err = 1;
3317                                 free_req->rq_status = -EIO;
3318                                 ptlrpc_client_wake_req(free_req);
3319                                 spin_unlock(&free_req->rq_lock);
3320                         }
3321
3322                         list_for_each_entry_safe(free_req, tmp,
3323                                                  &imp->imp_sending_list,
3324                                                  rq_list) {
3325                                 spin_lock(&free_req->rq_lock);
3326                                 free_req->rq_err = 1;
3327                                 free_req->rq_status = -EIO;
3328                                 ptlrpc_client_wake_req(free_req);
3329                                 spin_unlock(&free_req->rq_lock);
3330                         }
3331                         spin_unlock(&imp->imp_lock);
3332                 }
3333         } else {
3334                 /* Put it back for re-replay. */
3335                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3336         }
3337
3338         /*
3339          * Errors while replay can set transno to 0, but
3340          * imp_last_replay_transno shouldn't be set to 0 anyway
3341          */
3342         if (req->rq_transno == 0)
3343                 CERROR("Transno is 0 during replay!\n");
3344
3345         /* continue with recovery */
3346         rc = ptlrpc_import_recovery_state_machine(imp);
3347  out:
3348         req->rq_send_state = aa->praa_old_state;
3349
3350         if (rc != 0)
3351                 /* this replay failed, so restart recovery */
3352                 ptlrpc_connect_import(imp);
3353
3354         RETURN(rc);
3355 }
3356
3357 /**
3358  * Prepares and queues request for replay.
3359  * Adds it to ptlrpcd queue for actual sending.
3360  * Returns 0 on success.
3361  */
3362 int ptlrpc_replay_req(struct ptlrpc_request *req)
3363 {
3364         struct ptlrpc_replay_async_args *aa;
3365
3366         ENTRY;
3367
3368         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
3369
3370         aa = ptlrpc_req_async_args(aa, req);
3371         memset(aa, 0, sizeof(*aa));
3372
3373         /* Prepare request to be resent with ptlrpcd */
3374         aa->praa_old_state = req->rq_send_state;
3375         req->rq_send_state = LUSTRE_IMP_REPLAY;
3376         req->rq_phase = RQ_PHASE_NEW;
3377         req->rq_next_phase = RQ_PHASE_UNDEFINED;
3378         if (req->rq_repmsg)
3379                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
3380         req->rq_status = 0;
3381         req->rq_interpret_reply = ptlrpc_replay_interpret;
3382         /* Readjust the timeout for current conditions */
3383         ptlrpc_at_set_req_timeout(req);
3384
3385         /* Tell server net_latency to calculate how long to wait for reply. */
3386         lustre_msg_set_service_timeout(req->rq_reqmsg,
3387                                        ptlrpc_at_get_net_latency(req));
3388         DEBUG_REQ(D_HA, req, "REPLAY");
3389
3390         atomic_inc(&req->rq_import->imp_replay_inflight);
3391         spin_lock(&req->rq_lock);
3392         req->rq_early_free_repbuf = 0;
3393         spin_unlock(&req->rq_lock);
3394         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
3395
3396         ptlrpcd_add_req(req);
3397         RETURN(0);
3398 }
3399
3400 /**
3401  * Aborts all in-flight request on import \a imp sending and delayed lists
3402  */
3403 void ptlrpc_abort_inflight(struct obd_import *imp)
3404 {
3405         struct ptlrpc_request *req;
3406         ENTRY;
3407
3408         /*
3409          * Make sure that no new requests get processed for this import.
3410          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
3411          * this flag and then putting requests on sending_list or delayed_list.
3412          */
3413         assert_spin_locked(&imp->imp_lock);
3414
3415         /*
3416          * XXX locking?  Maybe we should remove each request with the list
3417          * locked?  Also, how do we know if the requests on the list are
3418          * being freed at this time?
3419          */
3420         list_for_each_entry(req, &imp->imp_sending_list, rq_list) {
3421                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
3422
3423                 spin_lock(&req->rq_lock);
3424                 if (req->rq_import_generation < imp->imp_generation) {
3425                         req->rq_err = 1;
3426                         req->rq_status = -EIO;
3427                         ptlrpc_client_wake_req(req);
3428                 }
3429                 spin_unlock(&req->rq_lock);
3430         }
3431
3432         list_for_each_entry(req, &imp->imp_delayed_list, rq_list) {
3433                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
3434
3435                 spin_lock(&req->rq_lock);
3436                 if (req->rq_import_generation < imp->imp_generation) {
3437                         req->rq_err = 1;
3438                         req->rq_status = -EIO;
3439                         ptlrpc_client_wake_req(req);
3440                 }
3441                 spin_unlock(&req->rq_lock);
3442         }
3443
3444         /*
3445          * Last chance to free reqs left on the replay list, but we
3446          * will still leak reqs that haven't committed.
3447          */
3448         if (imp->imp_replayable)
3449                 ptlrpc_free_committed(imp);
3450
3451         EXIT;
3452 }
3453
3454 /**
3455  * Abort all uncompleted requests in request set \a set
3456  */
3457 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
3458 {
3459         struct ptlrpc_request *req;
3460
3461         LASSERT(set != NULL);
3462
3463         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
3464                 spin_lock(&req->rq_lock);
3465                 if (req->rq_phase != RQ_PHASE_RPC) {
3466                         spin_unlock(&req->rq_lock);
3467                         continue;
3468                 }
3469
3470                 req->rq_err = 1;
3471                 req->rq_status = -EINTR;
3472                 ptlrpc_client_wake_req(req);
3473                 spin_unlock(&req->rq_lock);
3474         }
3475 }
3476
3477 /**
3478  * Initialize the XID for the node.  This is common among all requests on
3479  * this node, and only requires the property that it is monotonically
3480  * increasing.  It does not need to be sequential.  Since this is also used
3481  * as the RDMA match bits, it is important that a single client NOT have
3482  * the same match bits for two different in-flight requests, hence we do
3483  * NOT want to have an XID per target or similar.
3484  *
3485  * To avoid an unlikely collision between match bits after a client reboot
3486  * (which would deliver old data into the wrong RDMA buffer) initialize
3487  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
3488  * If the time is clearly incorrect, we instead use a 62-bit random number.
3489  * In the worst case the random number will overflow 1M RPCs per second in
3490  * 9133 years, or permutations thereof.
3491  */
3492 #define YEAR_2004 (1ULL << 30)
3493 void ptlrpc_init_xid(void)
3494 {
3495         time64_t now = ktime_get_real_seconds();
3496         u64 xid;
3497
3498         if (now < YEAR_2004) {
3499                 get_random_bytes(&xid, sizeof(xid));
3500                 xid >>= 2;
3501                 xid |= (1ULL << 61);
3502         } else {
3503                 xid = (u64)now << 20;
3504         }
3505
3506         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
3507         BUILD_BUG_ON((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) !=
3508                      0);
3509         xid &= PTLRPC_BULK_OPS_MASK;
3510         atomic64_set(&ptlrpc_last_xid, xid);
3511 }
3512
3513 /**
3514  * Increase xid and returns resulting new value to the caller.
3515  *
3516  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
3517  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
3518  * itself uses the last bulk xid needed, so the server can determine the
3519  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
3520  * xid must align to a power-of-two value.
3521  *
3522  * This is assumed to be true due to the initial ptlrpc_last_xid
3523  * value also being initialized to a power-of-two value. LU-1431
3524  */
3525 __u64 ptlrpc_next_xid(void)
3526 {
3527         return atomic64_add_return(PTLRPC_BULK_OPS_COUNT, &ptlrpc_last_xid);
3528 }
3529
3530 /**
3531  * If request has a new allocated XID (new request or EINPROGRESS resend),
3532  * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3533  * request to ensure previous bulk fails and avoid problems with lost replies
3534  * and therefore several transfers landing into the same buffer from different
3535  * sending attempts.
3536  * Also, to avoid previous reply landing to a different sending attempt.
3537  */
3538 void ptlrpc_set_mbits(struct ptlrpc_request *req)
3539 {
3540         int md_count = req->rq_bulk ? req->rq_bulk->bd_md_count : 1;
3541
3542         /*
3543          * Generate new matchbits for all resend requests, including
3544          * resend replay.
3545          */
3546         if (req->rq_resend) {
3547                 __u64 old_mbits = req->rq_mbits;
3548
3549                 /*
3550                  * First time resend on -EINPROGRESS will generate new xid,
3551                  * so we can actually use the rq_xid as rq_mbits in such case,
3552                  * however, it's bit hard to distinguish such resend with a
3553                  * 'resend for the -EINPROGRESS resend'. To make it simple,
3554                  * we opt to generate mbits for all resend cases.
3555                  */
3556                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3557                                  BULK_MBITS)) {
3558                         req->rq_mbits = ptlrpc_next_xid();
3559                 } else {
3560                         /*
3561                          * Old version transfers rq_xid to peer as
3562                          * matchbits.
3563                          */
3564                         spin_lock(&req->rq_import->imp_lock);
3565                         list_del_init(&req->rq_unreplied_list);
3566                         ptlrpc_assign_next_xid_nolock(req);
3567                         spin_unlock(&req->rq_import->imp_lock);
3568                         req->rq_mbits = req->rq_xid;
3569                 }
3570                 CDEBUG(D_HA, "resend with new mbits old x%llu new x%llu\n",
3571                        old_mbits, req->rq_mbits);
3572         } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
3573                 /* Request being sent first time, use xid as matchbits. */
3574                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3575                                  BULK_MBITS) || req->rq_mbits == 0)
3576                 {
3577                         req->rq_mbits = req->rq_xid;
3578                 } else {
3579                         req->rq_mbits -= md_count - 1;
3580                 }
3581         } else {
3582                 /*
3583                  * Replay request, xid and matchbits have already been
3584                  * correctly assigned.
3585                  */
3586                 return;
3587         }
3588
3589         /*
3590          * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3591          * that server can infer the number of bulks that were prepared,
3592          * see LU-1431
3593          */
3594         req->rq_mbits += md_count - 1;
3595
3596         /*
3597          * Set rq_xid as rq_mbits to indicate the final bulk for the old
3598          * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
3599          *
3600          * It's ok to directly set the rq_xid here, since this xid bump
3601          * won't affect the request position in unreplied list.
3602          */
3603         if (!OCD_HAS_FLAG(&req->rq_import->imp_connect_data, BULK_MBITS))
3604                 req->rq_xid = req->rq_mbits;
3605 }
3606
3607 /**
3608  * Get a glimpse at what next xid value might have been.
3609  * Returns possible next xid.
3610  */
3611 __u64 ptlrpc_sample_next_xid(void)
3612 {
3613         return atomic64_read(&ptlrpc_last_xid) + PTLRPC_BULK_OPS_COUNT;
3614 }
3615 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3616
3617 /**
3618  * Functions for operating ptlrpc workers.
3619  *
3620  * A ptlrpc work is a function which will be running inside ptlrpc context.
3621  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3622  *
3623  * 1. after a work is created, it can be used many times, that is:
3624  *         handler = ptlrpcd_alloc_work();
3625  *         ptlrpcd_queue_work();
3626  *
3627  *    queue it again when necessary:
3628  *         ptlrpcd_queue_work();
3629  *         ptlrpcd_destroy_work();
3630  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3631  *    it will only be queued once in any time. Also as its name implies, it may
3632  *    have delay before it really runs by ptlrpcd thread.
3633  */
3634 struct ptlrpc_work_async_args {
3635         int (*cb)(const struct lu_env *, void *);
3636         void *cbdata;
3637 };
3638
3639 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3640 {
3641         /* re-initialize the req */
3642         req->rq_timeout         = obd_timeout;
3643         req->rq_sent            = ktime_get_real_seconds();
3644         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3645         req->rq_phase           = RQ_PHASE_INTERPRET;
3646         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3647         req->rq_xid             = ptlrpc_next_xid();
3648         req->rq_import_generation = req->rq_import->imp_generation;
3649
3650         ptlrpcd_add_req(req);
3651 }
3652
3653 static int work_interpreter(const struct lu_env *env,
3654                             struct ptlrpc_request *req, void *args, int rc)
3655 {
3656         struct ptlrpc_work_async_args *arg = args;
3657
3658         LASSERT(ptlrpcd_check_work(req));
3659         LASSERT(arg->cb != NULL);
3660
3661         rc = arg->cb(env, arg->cbdata);
3662
3663         list_del_init(&req->rq_set_chain);
3664         req->rq_set = NULL;
3665
3666         if (atomic_dec_return(&req->rq_refcount) > 1) {
3667                 atomic_set(&req->rq_refcount, 2);
3668                 ptlrpcd_add_work_req(req);
3669         }
3670         return rc;
3671 }
3672
3673 static int worker_format;
3674
3675 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3676 {
3677         return req->rq_pill.rc_fmt == (void *)&worker_format;
3678 }
3679
3680 /**
3681  * Create a work for ptlrpc.
3682  */
3683 void *ptlrpcd_alloc_work(struct obd_import *imp,
3684                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3685 {
3686         struct ptlrpc_request *req = NULL;
3687         struct ptlrpc_work_async_args *args;
3688
3689         ENTRY;
3690         might_sleep();
3691
3692         if (!cb)
3693                 RETURN(ERR_PTR(-EINVAL));
3694
3695         /* copy some code from deprecated fakereq. */
3696         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3697         if (!req) {
3698                 CERROR("ptlrpc: run out of memory!\n");
3699                 RETURN(ERR_PTR(-ENOMEM));
3700         }
3701
3702         ptlrpc_cli_req_init(req);
3703
3704         req->rq_send_state = LUSTRE_IMP_FULL;
3705         req->rq_type = PTL_RPC_MSG_REQUEST;
3706         req->rq_import = class_import_get(imp);
3707         req->rq_interpret_reply = work_interpreter;
3708         /* don't want reply */
3709         req->rq_no_delay = req->rq_no_resend = 1;
3710         req->rq_pill.rc_fmt = (void *)&worker_format;
3711
3712         args = ptlrpc_req_async_args(args, req);
3713         args->cb     = cb;
3714         args->cbdata = cbdata;
3715
3716         RETURN(req);
3717 }
3718 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3719
3720 void ptlrpcd_destroy_work(void *handler)
3721 {
3722         struct ptlrpc_request *req = handler;
3723
3724         if (req)
3725                 ptlrpc_req_finished(req);
3726 }
3727 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3728
3729 int ptlrpcd_queue_work(void *handler)
3730 {
3731         struct ptlrpc_request *req = handler;
3732
3733         /*
3734          * Check if the req is already being queued.
3735          *
3736          * Here comes a trick: it lacks a way of checking if a req is being
3737          * processed reliably in ptlrpc. Here I have to use refcount of req
3738          * for this purpose. This is okay because the caller should use this
3739          * req as opaque data. - Jinshan
3740          */
3741         LASSERT(atomic_read(&req->rq_refcount) > 0);
3742         if (atomic_inc_return(&req->rq_refcount) == 2)
3743                 ptlrpcd_add_work_req(req);
3744         return 0;
3745 }
3746 EXPORT_SYMBOL(ptlrpcd_queue_work);