Whamcloud - gitweb
LU-17704 revert: "LU-17379 ptlrpc: fix check for callback discard"
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 /** Implementation of client-side PortalRPC interfaces */
33
34 #define DEBUG_SUBSYSTEM S_RPC
35
36 #include <linux/delay.h>
37 #include <linux/random.h>
38
39 #include <lnet/lib-lnet.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_lib.h>
43 #include <lustre_ha.h>
44 #include <lustre_import.h>
45 #include <lustre_req_layout.h>
46
47 #include "ptlrpc_internal.h"
48
49 static void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
50                                       struct page *page, int pageoffset,
51                                       int len)
52 {
53         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
54 }
55
56 static void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
57                                         struct page *page, int pageoffset,
58                                         int len)
59 {
60         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
61 }
62
63 static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
64 {
65         int i;
66
67         for (i = 0; i < desc->bd_iov_count ; i++)
68                 put_page(desc->bd_vec[i].bv_page);
69 }
70
71 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
72                                        void *frag, int len)
73 {
74         unsigned int offset = (unsigned long)frag & ~PAGE_MASK;
75
76         ENTRY;
77         while (len > 0) {
78                 int page_len = min_t(unsigned int, PAGE_SIZE - offset,
79                                      len);
80                 struct page *p;
81
82                 if (!is_vmalloc_addr(frag))
83                         p = virt_to_page((unsigned long)frag);
84                 else
85                         p = vmalloc_to_page(frag);
86                 ptlrpc_prep_bulk_page_nopin(desc, p, offset, page_len);
87                 offset = 0;
88                 len -= page_len;
89                 frag += page_len;
90         }
91
92         RETURN(desc->bd_nob);
93 }
94
95 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
96         .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
97         .release_frags  = ptlrpc_release_bulk_page_pin,
98 };
99 EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
100
101 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
102         .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
103         .release_frags  = ptlrpc_release_bulk_noop,
104         .add_iov_frag   = ptlrpc_prep_bulk_frag_pages,
105 };
106 EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
107
108 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
109 static int ptlrpcd_check_work(struct ptlrpc_request *req);
110 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
111
112 /**
113  * Initialize passed in client structure \a cl.
114  */
115 void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
116                         struct ptlrpc_client *cl)
117 {
118         cl->cli_request_portal = req_portal;
119         cl->cli_reply_portal   = rep_portal;
120         cl->cli_name           = name;
121 }
122 EXPORT_SYMBOL(ptlrpc_init_client);
123
124 /**
125  * Return PortalRPC connection for remore uud \a uuid
126  */
127 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
128                                                     u32 refnet)
129 {
130         struct ptlrpc_connection *c;
131         struct lnet_nid self;
132         struct lnet_processid peer;
133         int err;
134
135         /*
136          * ptlrpc_uuid_to_peer() initializes its 2nd parameter
137          * before accessing its values.
138          */
139         err = ptlrpc_uuid_to_peer(uuid, &peer, &self, refnet);
140         if (err != 0) {
141                 CNETERR("cannot find peer %s!\n", uuid->uuid);
142                 return NULL;
143         }
144
145         c = ptlrpc_connection_get(&peer, &self, uuid);
146         if (c) {
147                 memcpy(c->c_remote_uuid.uuid,
148                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
149         }
150
151         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
152
153         return c;
154 }
155
156 /**
157  * Allocate and initialize new bulk descriptor on the sender.
158  * Returns pointer to the descriptor or NULL on error.
159  */
160 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
161                                          unsigned int max_brw,
162                                          enum ptlrpc_bulk_op_type type,
163                                          unsigned int portal,
164                                          const struct ptlrpc_bulk_frag_ops *ops)
165 {
166         struct ptlrpc_bulk_desc *desc;
167         int i;
168
169         LASSERT(ops->add_kiov_frag != NULL);
170
171         if (max_brw > PTLRPC_BULK_OPS_COUNT)
172                 RETURN(NULL);
173
174         if (nfrags > LNET_MAX_IOV * max_brw)
175                 RETURN(NULL);
176
177         OBD_ALLOC_PTR(desc);
178         if (!desc)
179                 return NULL;
180
181         OBD_ALLOC_LARGE(desc->bd_vec,
182                         nfrags * sizeof(*desc->bd_vec));
183         if (!desc->bd_vec)
184                 goto out;
185
186         spin_lock_init(&desc->bd_lock);
187         init_waitqueue_head(&desc->bd_waitq);
188         desc->bd_max_iov = nfrags;
189         desc->bd_iov_count = 0;
190         desc->bd_portal = portal;
191         desc->bd_type = type;
192         desc->bd_md_count = 0;
193         desc->bd_nob_last = LNET_MTU;
194         desc->bd_frag_ops = ops;
195         LASSERT(max_brw > 0);
196         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
197         /*
198          * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
199          * node. Negotiated ocd_brw_size will always be <= this number.
200          */
201         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
202                 LNetInvalidateMDHandle(&desc->bd_mds[i]);
203
204         return desc;
205 out:
206         OBD_FREE_PTR(desc);
207         return NULL;
208 }
209
210 /**
211  * Prepare bulk descriptor for specified outgoing request \a req that
212  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
213  * the bulk to be sent. Used on client-side.
214  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
215  * error.
216  */
217 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
218                                               unsigned int nfrags,
219                                               unsigned int max_brw,
220                                               unsigned int type,
221                                               unsigned int portal,
222                                               const struct ptlrpc_bulk_frag_ops
223                                                 *ops)
224 {
225         struct obd_import *imp = req->rq_import;
226         struct ptlrpc_bulk_desc *desc;
227
228         ENTRY;
229         LASSERT(ptlrpc_is_bulk_op_passive(type));
230
231         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
232         if (!desc)
233                 RETURN(NULL);
234
235         desc->bd_import = class_import_get(imp);
236         desc->bd_req = req;
237
238         desc->bd_cbid.cbid_fn  = client_bulk_callback;
239         desc->bd_cbid.cbid_arg = desc;
240
241         /* This makes req own desc, and free it when she frees herself */
242         req->rq_bulk = desc;
243
244         return desc;
245 }
246 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
247
248 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
249                              struct page *page, int pageoffset, int len,
250                              int pin)
251 {
252         struct bio_vec *kiov;
253
254         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
255         LASSERT(page != NULL);
256         LASSERT(pageoffset >= 0);
257         LASSERT(len > 0);
258         LASSERT(pageoffset + len <= PAGE_SIZE);
259
260         kiov = &desc->bd_vec[desc->bd_iov_count];
261
262         if (((desc->bd_iov_count % LNET_MAX_IOV) == 0) ||
263              ((desc->bd_nob_last + len) > LNET_MTU)) {
264                 desc->bd_mds_off[desc->bd_md_count] = desc->bd_iov_count;
265                 desc->bd_md_count++;
266                 desc->bd_nob_last = 0;
267                 LASSERT(desc->bd_md_count <= PTLRPC_BULK_OPS_COUNT);
268         }
269
270         desc->bd_nob_last += len;
271         desc->bd_nob += len;
272
273         if (pin)
274                 get_page(page);
275
276         kiov->bv_page = page;
277         kiov->bv_offset = pageoffset;
278         kiov->bv_len = len;
279
280         desc->bd_iov_count++;
281 }
282 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
283
284 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
285 {
286         ENTRY;
287
288         LASSERT(desc != NULL);
289         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
290         LASSERT(desc->bd_refs == 0);         /* network hands off */
291         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
292         LASSERT(desc->bd_frag_ops != NULL);
293
294         obd_pool_put_desc_pages(desc);
295
296         if (desc->bd_export)
297                 class_export_put(desc->bd_export);
298         else
299                 class_import_put(desc->bd_import);
300
301         if (desc->bd_frag_ops->release_frags != NULL)
302                 desc->bd_frag_ops->release_frags(desc);
303
304         OBD_FREE_LARGE(desc->bd_vec,
305                        desc->bd_max_iov * sizeof(*desc->bd_vec));
306         OBD_FREE_PTR(desc);
307         EXIT;
308 }
309 EXPORT_SYMBOL(ptlrpc_free_bulk);
310
311 /**
312  * Set server timelimit for this req, i.e. how long are we willing to wait
313  * for reply before timing out this request.
314  */
315 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
316 {
317         struct obd_device *obd;
318
319         LASSERT(req->rq_import);
320         obd = req->rq_import->imp_obd;
321
322         if (obd_at_off(obd)) {
323                 /* non-AT settings */
324                 /**
325                  * \a imp_server_timeout means this is reverse import and
326                  * we send (currently only) ASTs to the client and cannot afford
327                  * to wait too long for the reply, otherwise the other client
328                  * (because of which we are sending this request) would
329                  * timeout waiting for us
330                  */
331                 req->rq_timeout = req->rq_import->imp_server_timeout ?
332                                   obd_timeout / 2 : obd_timeout;
333         } else {
334                 struct imp_at *at = &req->rq_import->imp_at;
335                 timeout_t serv_est;
336                 int idx;
337
338                 idx = import_at_get_index(req->rq_import,
339                                           req->rq_request_portal);
340                 serv_est = obd_at_get(obd, &at->iat_service_estimate[idx]);
341                 /*
342                  * Currently a 32 bit value is sent over the
343                  * wire for rq_timeout so please don't change this
344                  * to time64_t. The work for LU-1158 will in time
345                  * replace rq_timeout with a 64 bit nanosecond value
346                  */
347                 req->rq_timeout = at_est2timeout(serv_est);
348         }
349         /*
350          * We could get even fancier here, using history to predict increased
351          * loading...
352          *
353          * Let the server know what this RPC timeout is by putting it in the
354          * reqmsg
355          */
356         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
357 }
358 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
359
360 /* Adjust max service estimate based on server value */
361 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
362                                   timeout_t serv_est)
363 {
364         int idx;
365         timeout_t oldse;
366         struct imp_at *at;
367         struct obd_device *obd;
368
369         LASSERT(req->rq_import);
370         obd = req->rq_import->imp_obd;
371         at = &req->rq_import->imp_at;
372
373         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
374         /*
375          * max service estimates are tracked on the server side,
376          * so just keep minimal history here
377          */
378         oldse = obd_at_measure(obd, &at->iat_service_estimate[idx], serv_est);
379         if (oldse != 0) {
380                 unsigned int at_est = obd_at_get(obd,
381                                                 &at->iat_service_estimate[idx]);
382                 CDEBUG(D_ADAPTTO,
383                        "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
384                        req->rq_import->imp_obd->obd_name,
385                        req->rq_request_portal,
386                        oldse, at_est);
387         }
388 }
389
390 /**
391  * Returns Expected network latency per remote node (secs).
392  *
393  * \param[in] req       ptlrpc request
394  *
395  * \retval      0 if AT(Adaptive Timeout) is off
396  * \retval      >0 (iat_net_latency) latency per node
397  */
398 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
399 {
400         struct obd_device *obd = req->rq_import->imp_obd;
401
402         return obd_at_off(obd) ?
403                0 : obd_at_get(obd, &req->rq_import->imp_at.iat_net_latency);
404 }
405
406 /* Adjust expected network latency */
407 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
408                                timeout_t service_timeout)
409 {
410         time64_t now = ktime_get_real_seconds();
411         struct imp_at *at;
412         timeout_t oldnl;
413         timeout_t nl;
414         struct obd_device *obd;
415
416         LASSERT(req->rq_import);
417         obd = req->rq_import->imp_obd;
418
419         if (service_timeout > now - req->rq_sent + 3) {
420                 /*
421                  * b=16408, however, this can also happen if early reply
422                  * is lost and client RPC is expired and resent, early reply
423                  * or reply of original RPC can still be fit in reply buffer
424                  * of resent RPC, now client is measuring time from the
425                  * resent time, but server sent back service time of original
426                  * RPC.
427                  */
428                 CDEBUG_LIMIT((lustre_msg_get_flags(req->rq_reqmsg) &
429                               MSG_RESENT) ?  D_ADAPTTO : D_WARNING,
430                              "Reported service time %u > total measured time %lld\n",
431                              service_timeout, now - req->rq_sent);
432                 return;
433         }
434
435         /* Network latency is total time less server processing time,
436          * st rounding
437          */
438         nl = max_t(timeout_t, now - req->rq_sent - service_timeout, 0) + 1;
439         at = &req->rq_import->imp_at;
440
441         oldnl = obd_at_measure(obd, &at->iat_net_latency, nl);
442         if (oldnl != 0) {
443                 timeout_t timeout = obd_at_get(obd, &at->iat_net_latency);
444
445                 CDEBUG(D_ADAPTTO,
446                        "The network latency for %s (nid %s) has changed from %d to %d\n",
447                        req->rq_import->imp_obd->obd_name,
448                        obd_uuid2str(&req->rq_import->imp_connection->c_remote_uuid),
449                        oldnl, timeout);
450         }
451 }
452
453 static int unpack_reply(struct ptlrpc_request *req)
454 {
455         int rc;
456
457         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
458                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
459                 if (rc) {
460                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: rc = %d",
461                                   rc);
462                         return -EPROTO;
463                 }
464         }
465
466         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
467         if (rc) {
468                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: rc = %d",
469                           rc);
470                 return -EPROTO;
471         }
472         return 0;
473 }
474
475 /**
476  * Handle an early reply message, called with the rq_lock held.
477  * If anything goes wrong just ignore it - same as if it never happened
478  */
479 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
480 __must_hold(&req->rq_lock)
481 {
482         struct ptlrpc_request *early_req;
483         timeout_t service_timeout;
484         time64_t olddl;
485         int rc;
486
487         ENTRY;
488         req->rq_early = 0;
489         spin_unlock(&req->rq_lock);
490
491         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
492         if (rc) {
493                 spin_lock(&req->rq_lock);
494                 RETURN(rc);
495         }
496
497         rc = unpack_reply(early_req);
498         if (rc != 0) {
499                 sptlrpc_cli_finish_early_reply(early_req);
500                 spin_lock(&req->rq_lock);
501                 RETURN(rc);
502         }
503
504         /*
505          * Use new timeout value just to adjust the local value for this
506          * request, don't include it into at_history. It is unclear yet why
507          * service time increased and should it be counted or skipped, e.g.
508          * that can be recovery case or some error or server, the real reply
509          * will add all new data if it is worth to add.
510          */
511         req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
512         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
513
514         /* Network latency can be adjusted, it is pure network delays */
515         service_timeout = lustre_msg_get_service_timeout(early_req->rq_repmsg);
516         ptlrpc_at_adj_net_latency(req, service_timeout);
517
518         sptlrpc_cli_finish_early_reply(early_req);
519
520         spin_lock(&req->rq_lock);
521         olddl = req->rq_deadline;
522         /*
523          * server assumes it now has rq_timeout from when the request
524          * arrived, so the client should give it at least that long.
525          * since we don't know the arrival time we'll use the original
526          * sent time
527          */
528         req->rq_deadline = req->rq_sent + req->rq_timeout +
529                            ptlrpc_at_get_net_latency(req);
530
531         /* The below message is checked in replay-single.sh test_65{a,b} */
532         /* The below message is checked in sanity-{gss,krb5} test_8 */
533         DEBUG_REQ(D_ADAPTTO, req,
534                   "Early reply #%d, new deadline in %llds (%llds)",
535                   req->rq_early_count,
536                   req->rq_deadline - ktime_get_real_seconds(),
537                   req->rq_deadline - olddl);
538
539         RETURN(rc);
540 }
541
542 static struct kmem_cache *request_cache;
543
544 int ptlrpc_request_cache_init(void)
545 {
546         request_cache = kmem_cache_create("ptlrpc_cache",
547                                           sizeof(struct ptlrpc_request),
548                                           0, SLAB_HWCACHE_ALIGN, NULL);
549         return request_cache ? 0 : -ENOMEM;
550 }
551
552 void ptlrpc_request_cache_fini(void)
553 {
554         kmem_cache_destroy(request_cache);
555 }
556
557 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
558 {
559         struct ptlrpc_request *req;
560
561         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
562         return req;
563 }
564
565 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
566 {
567         OBD_SLAB_FREE_PTR(req, request_cache);
568 }
569
570 /**
571  * Wind down request pool \a pool.
572  * Frees all requests from the pool too
573  */
574 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
575 {
576         struct ptlrpc_request *req;
577
578         LASSERT(pool != NULL);
579
580         spin_lock(&pool->prp_lock);
581         while ((req = list_first_entry_or_null(&pool->prp_req_list,
582                                                struct ptlrpc_request,
583                                                rq_list))) {
584                 list_del(&req->rq_list);
585                 LASSERT(req->rq_reqbuf);
586                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
587                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
588                 ptlrpc_request_cache_free(req);
589         }
590         spin_unlock(&pool->prp_lock);
591         OBD_FREE(pool, sizeof(*pool));
592 }
593 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
594
595 /**
596  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
597  */
598 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
599 {
600         int i;
601         int size = 1;
602
603         while (size < pool->prp_rq_size)
604                 size <<= 1;
605
606         LASSERTF(list_empty(&pool->prp_req_list) ||
607                  size == pool->prp_rq_size,
608                  "Trying to change pool size with nonempty pool from %d to %d bytes\n",
609                  pool->prp_rq_size, size);
610
611         pool->prp_rq_size = size;
612         for (i = 0; i < num_rq; i++) {
613                 struct ptlrpc_request *req;
614                 struct lustre_msg *msg;
615
616                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
617                 if (!req)
618                         return i;
619                 OBD_ALLOC_LARGE(msg, size);
620                 if (!msg) {
621                         ptlrpc_request_cache_free(req);
622                         return i;
623                 }
624                 req->rq_reqbuf = msg;
625                 req->rq_reqbuf_len = size;
626                 req->rq_pool = pool;
627                 spin_lock(&pool->prp_lock);
628                 list_add_tail(&req->rq_list, &pool->prp_req_list);
629                 spin_unlock(&pool->prp_lock);
630         }
631         return num_rq;
632 }
633 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
634
635 /**
636  * Create and initialize new request pool with given attributes:
637  * \a num_rq - initial number of requests to create for the pool
638  * \a msgsize - maximum message size possible for requests in thid pool
639  * \a populate_pool - function to be called when more requests need to be added
640  *                    to the pool
641  * Returns pointer to newly created pool or NULL on error.
642  */
643 struct ptlrpc_request_pool *
644 ptlrpc_init_rq_pool(int num_rq, int msgsize,
645                     int (*populate_pool)(struct ptlrpc_request_pool *, int))
646 {
647         struct ptlrpc_request_pool *pool;
648
649         OBD_ALLOC_PTR(pool);
650         if (!pool)
651                 return NULL;
652
653         /*
654          * Request next power of two for the allocation, because internally
655          * kernel would do exactly this
656          */
657         spin_lock_init(&pool->prp_lock);
658         INIT_LIST_HEAD(&pool->prp_req_list);
659         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
660         pool->prp_populate = populate_pool;
661
662         populate_pool(pool, num_rq);
663
664         return pool;
665 }
666 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
667
668 /**
669  * Fetches one request from pool \a pool
670  */
671 static struct ptlrpc_request *
672 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
673 {
674         struct ptlrpc_request *request;
675         struct lustre_msg *reqbuf;
676
677         if (!pool)
678                 return NULL;
679
680         spin_lock(&pool->prp_lock);
681
682         /*
683          * See if we have anything in a pool, and bail out if nothing,
684          * in writeout path, where this matters, this is safe to do, because
685          * nothing is lost in this case, and when some in-flight requests
686          * complete, this code will be called again.
687          */
688         if (unlikely(list_empty(&pool->prp_req_list))) {
689                 spin_unlock(&pool->prp_lock);
690                 return NULL;
691         }
692
693         request = list_first_entry(&pool->prp_req_list, struct ptlrpc_request,
694                                    rq_list);
695         list_del_init(&request->rq_list);
696         spin_unlock(&pool->prp_lock);
697
698         LASSERT(request->rq_reqbuf);
699         LASSERT(request->rq_pool);
700
701         reqbuf = request->rq_reqbuf;
702         memset(request, 0, sizeof(*request));
703         request->rq_reqbuf = reqbuf;
704         request->rq_reqbuf_len = pool->prp_rq_size;
705         request->rq_pool = pool;
706
707         return request;
708 }
709
710 /**
711  * Returns freed \a request to pool.
712  */
713 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
714 {
715         struct ptlrpc_request_pool *pool = request->rq_pool;
716
717         spin_lock(&pool->prp_lock);
718         LASSERT(list_empty(&request->rq_list));
719         LASSERT(!request->rq_receiving_reply);
720         list_add_tail(&request->rq_list, &pool->prp_req_list);
721         spin_unlock(&pool->prp_lock);
722 }
723
724 void ptlrpc_add_unreplied(struct ptlrpc_request *req)
725 {
726         struct obd_import *imp = req->rq_import;
727         struct ptlrpc_request *iter;
728
729         assert_spin_locked(&imp->imp_lock);
730         LASSERT(list_empty(&req->rq_unreplied_list));
731
732         /* unreplied list is sorted by xid in ascending order */
733         list_for_each_entry_reverse(iter, &imp->imp_unreplied_list,
734                                     rq_unreplied_list) {
735                 LASSERT(req->rq_xid != iter->rq_xid);
736                 if (req->rq_xid < iter->rq_xid)
737                         continue;
738                 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
739                 return;
740         }
741         list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
742 }
743
744 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
745 {
746         req->rq_xid = ptlrpc_next_xid();
747         ptlrpc_add_unreplied(req);
748 }
749
750 static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
751 {
752         spin_lock(&req->rq_import->imp_lock);
753         ptlrpc_assign_next_xid_nolock(req);
754         spin_unlock(&req->rq_import->imp_lock);
755 }
756
757 static atomic64_t ptlrpc_last_xid;
758
759 static void ptlrpc_reassign_next_xid(struct ptlrpc_request *req)
760 {
761         spin_lock(&req->rq_import->imp_lock);
762         list_del_init(&req->rq_unreplied_list);
763         ptlrpc_assign_next_xid_nolock(req);
764         spin_unlock(&req->rq_import->imp_lock);
765         DEBUG_REQ(D_RPCTRACE, req, "reassign xid");
766 }
767
768 void ptlrpc_get_mod_rpc_slot(struct ptlrpc_request *req)
769 {
770         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
771         __u32 opc;
772         __u16 tag;
773
774         opc = lustre_msg_get_opc(req->rq_reqmsg);
775         tag = obd_get_mod_rpc_slot(cli, opc);
776         lustre_msg_set_tag(req->rq_reqmsg, tag);
777         ptlrpc_reassign_next_xid(req);
778 }
779 EXPORT_SYMBOL(ptlrpc_get_mod_rpc_slot);
780
781 void ptlrpc_put_mod_rpc_slot(struct ptlrpc_request *req)
782 {
783         __u16 tag = lustre_msg_get_tag(req->rq_reqmsg);
784
785         if (tag != 0) {
786                 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
787                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
788
789                 obd_put_mod_rpc_slot(cli, opc, tag);
790         }
791 }
792 EXPORT_SYMBOL(ptlrpc_put_mod_rpc_slot);
793
794 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
795                              __u32 version, int opcode, char **bufs,
796                              struct ptlrpc_cli_ctx *ctx)
797 {
798         int count;
799         struct obd_import *imp;
800         __u32 *lengths;
801         int rc;
802
803         ENTRY;
804
805         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
806         imp = request->rq_import;
807         lengths = request->rq_pill.rc_area[RCL_CLIENT];
808
809         if (ctx) {
810                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
811         } else {
812                 rc = sptlrpc_req_get_ctx(request);
813                 if (rc)
814                         GOTO(out_free, rc);
815         }
816         sptlrpc_req_set_flavor(request, opcode);
817
818         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
819                                  lengths, bufs);
820         if (rc)
821                 GOTO(out_ctx, rc);
822
823         lustre_msg_add_version(request->rq_reqmsg, version);
824         request->rq_send_state = LUSTRE_IMP_FULL;
825         request->rq_type = PTL_RPC_MSG_REQUEST;
826
827         request->rq_req_cbid.cbid_fn  = request_out_callback;
828         request->rq_req_cbid.cbid_arg = request;
829
830         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
831         request->rq_reply_cbid.cbid_arg = request;
832
833         request->rq_reply_deadline = 0;
834         request->rq_bulk_deadline = 0;
835         request->rq_req_deadline = 0;
836         request->rq_phase = RQ_PHASE_NEW;
837         request->rq_next_phase = RQ_PHASE_UNDEFINED;
838
839         request->rq_request_portal = imp->imp_client->cli_request_portal;
840         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
841
842         ptlrpc_at_set_req_timeout(request);
843
844         lustre_msg_set_opc(request->rq_reqmsg, opcode);
845
846         /* Let's setup deadline for req/reply/bulk unlink for opcode. */
847         if (cfs_fail_val == opcode) {
848                 time64_t *fail_t = NULL, *fail2_t = NULL;
849
850                 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
851                         fail_t = &request->rq_bulk_deadline;
852                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
853                         fail_t = &request->rq_reply_deadline;
854                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
855                         fail_t = &request->rq_req_deadline;
856                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
857                         fail_t = &request->rq_reply_deadline;
858                         fail2_t = &request->rq_bulk_deadline;
859                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_ROUND_XID)) {
860                         time64_t now = ktime_get_real_seconds();
861                         u64 xid = ((u64)now >> 4) << 24;
862
863                         atomic64_set(&ptlrpc_last_xid, xid);
864                 }
865
866                 if (fail_t) {
867                         *fail_t = ktime_get_real_seconds() +
868                                   PTLRPC_REQ_LONG_UNLINK;
869
870                         if (fail2_t)
871                                 *fail2_t = ktime_get_real_seconds() +
872                                            PTLRPC_REQ_LONG_UNLINK;
873
874                         /*
875                          * The RPC is infected, let the test to change the
876                          * fail_loc
877                          */
878                         msleep(4 * MSEC_PER_SEC);
879                 }
880         }
881         ptlrpc_assign_next_xid(request);
882
883         RETURN(0);
884
885 out_ctx:
886         LASSERT(!request->rq_pool);
887         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
888 out_free:
889         atomic_dec(&imp->imp_reqs);
890         class_import_put(imp);
891
892         return rc;
893 }
894 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
895
896 /**
897  * Pack request buffers for network transfer, performing necessary encryption
898  * steps if necessary.
899  */
900 int ptlrpc_request_pack(struct ptlrpc_request *request,
901                         __u32 version, int opcode)
902 {
903         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
904 }
905 EXPORT_SYMBOL(ptlrpc_request_pack);
906
907 /**
908  * Helper function to allocate new request on import \a imp
909  * and possibly using existing request from pool \a pool if provided.
910  * Returns allocated request structure with import field filled or
911  * NULL on error.
912  */
913 static inline
914 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
915                                               struct ptlrpc_request_pool *pool)
916 {
917         struct ptlrpc_request *request = NULL;
918
919         request = ptlrpc_request_cache_alloc(GFP_NOFS);
920
921         if (!request && pool)
922                 request = ptlrpc_prep_req_from_pool(pool);
923
924         if (request) {
925                 ptlrpc_cli_req_init(request);
926
927                 LASSERTF((unsigned long)imp > 0x1000, "%px\n", imp);
928                 LASSERT(imp != LP_POISON);
929                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%px\n",
930                          imp->imp_client);
931                 LASSERT(imp->imp_client != LP_POISON);
932
933                 request->rq_import = class_import_get(imp);
934                 atomic_inc(&imp->imp_reqs);
935         } else {
936                 CERROR("request allocation out of memory\n");
937         }
938
939         return request;
940 }
941
942 static int ptlrpc_reconnect_if_idle(struct obd_import *imp)
943 {
944         int rc;
945
946         /*
947          * initiate connection if needed when the import has been
948          * referenced by the new request to avoid races with disconnect.
949          * serialize this check against conditional state=IDLE
950          * in ptlrpc_disconnect_idle_interpret()
951          */
952         spin_lock(&imp->imp_lock);
953         if (imp->imp_state == LUSTRE_IMP_IDLE) {
954                 imp->imp_generation++;
955                 imp->imp_initiated_at = imp->imp_generation;
956                 imp->imp_state = LUSTRE_IMP_NEW;
957
958                 /* connect_import_locked releases imp_lock */
959                 rc = ptlrpc_connect_import_locked(imp);
960                 if (rc)
961                         return rc;
962                 ptlrpc_pinger_add_import(imp);
963         } else {
964                 spin_unlock(&imp->imp_lock);
965         }
966         return 0;
967 }
968
969 /**
970  * Helper function for creating a request.
971  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
972  * buffer structures according to capsule template \a format.
973  * Returns allocated request structure pointer or NULL on error.
974  */
975 static struct ptlrpc_request *
976 ptlrpc_request_alloc_internal(struct obd_import *imp,
977                               struct ptlrpc_request_pool *pool,
978                               const struct req_format *format)
979 {
980         struct ptlrpc_request *request;
981
982         request = __ptlrpc_request_alloc(imp, pool);
983         if (!request)
984                 return NULL;
985
986         /* don't make expensive check for idling connection
987          * if it's already connected */
988         if (unlikely(imp->imp_state != LUSTRE_IMP_FULL)) {
989                 if (ptlrpc_reconnect_if_idle(imp) < 0) {
990                         atomic_dec(&imp->imp_reqs);
991                         ptlrpc_request_free(request);
992                         return NULL;
993                 }
994         }
995
996         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
997         req_capsule_set(&request->rq_pill, format);
998         return request;
999 }
1000
1001 /**
1002  * Allocate new request structure for import \a imp and initialize its
1003  * buffer structure according to capsule template \a format.
1004  */
1005 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
1006                                             const struct req_format *format)
1007 {
1008         return ptlrpc_request_alloc_internal(imp, NULL, format);
1009 }
1010 EXPORT_SYMBOL(ptlrpc_request_alloc);
1011
1012 /**
1013  * Allocate new request structure for import \a imp from pool \a pool and
1014  * initialize its buffer structure according to capsule template \a format.
1015  */
1016 struct ptlrpc_request *
1017 ptlrpc_request_alloc_pool(struct obd_import *imp,
1018                           struct ptlrpc_request_pool *pool,
1019                           const struct req_format *format)
1020 {
1021         return ptlrpc_request_alloc_internal(imp, pool, format);
1022 }
1023 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
1024
1025 /**
1026  * For requests not from pool, free memory of the request structure.
1027  * For requests obtained from a pool earlier, return request back to pool.
1028  */
1029 void ptlrpc_request_free(struct ptlrpc_request *request)
1030 {
1031         if (request->rq_pool)
1032                 __ptlrpc_free_req_to_pool(request);
1033         else
1034                 ptlrpc_request_cache_free(request);
1035 }
1036 EXPORT_SYMBOL(ptlrpc_request_free);
1037
1038 /**
1039  * Allocate new request for operatione \a opcode and immediatelly pack it for
1040  * network transfer.
1041  * Only used for simple requests like OBD_PING where the only important
1042  * part of the request is operation itself.
1043  * Returns allocated request or NULL on error.
1044  */
1045 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1046                                                  const struct req_format *format,
1047                                                  __u32 version, int opcode)
1048 {
1049         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
1050         int rc;
1051
1052         if (req) {
1053                 rc = ptlrpc_request_pack(req, version, opcode);
1054                 if (rc) {
1055                         ptlrpc_request_free(req);
1056                         req = NULL;
1057                 }
1058         }
1059         return req;
1060 }
1061 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
1062
1063 /**
1064  * Allocate and initialize new request set structure on the current CPT.
1065  * Returns a pointer to the newly allocated set structure or NULL on error.
1066  */
1067 struct ptlrpc_request_set *ptlrpc_prep_set(void)
1068 {
1069         struct ptlrpc_request_set *set;
1070         int cpt;
1071
1072         ENTRY;
1073         cpt = cfs_cpt_current(cfs_cpt_tab, 0);
1074         OBD_CPT_ALLOC(set, cfs_cpt_tab, cpt, sizeof(*set));
1075         if (!set)
1076                 RETURN(NULL);
1077         atomic_set(&set->set_refcount, 1);
1078         INIT_LIST_HEAD(&set->set_requests);
1079         init_waitqueue_head(&set->set_waitq);
1080         atomic_set(&set->set_new_count, 0);
1081         atomic_set(&set->set_remaining, 0);
1082         spin_lock_init(&set->set_new_req_lock);
1083         INIT_LIST_HEAD(&set->set_new_requests);
1084         set->set_max_inflight = UINT_MAX;
1085         set->set_producer     = NULL;
1086         set->set_producer_arg = NULL;
1087         set->set_rc           = 0;
1088
1089         RETURN(set);
1090 }
1091 EXPORT_SYMBOL(ptlrpc_prep_set);
1092
1093 /**
1094  * Allocate and initialize new request set structure with flow control
1095  * extension. This extension allows to control the number of requests in-flight
1096  * for the whole set. A callback function to generate requests must be provided
1097  * and the request set will keep the number of requests sent over the wire to
1098  * @max_inflight.
1099  * Returns a pointer to the newly allocated set structure or NULL on error.
1100  */
1101 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1102                                              void *arg)
1103
1104 {
1105         struct ptlrpc_request_set *set;
1106
1107         set = ptlrpc_prep_set();
1108         if (!set)
1109                 RETURN(NULL);
1110
1111         set->set_max_inflight  = max;
1112         set->set_producer      = func;
1113         set->set_producer_arg  = arg;
1114
1115         RETURN(set);
1116 }
1117
1118 /**
1119  * Wind down and free request set structure previously allocated with
1120  * ptlrpc_prep_set.
1121  * Ensures that all requests on the set have completed and removes
1122  * all requests from the request list in a set.
1123  * If any unsent request happen to be on the list, pretends that they got
1124  * an error in flight and calls their completion handler.
1125  */
1126 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
1127 {
1128         struct ptlrpc_request *req;
1129         int expected_phase;
1130         int n = 0;
1131
1132         ENTRY;
1133
1134         /* Requests on the set should either all be completed, or all be new */
1135         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
1136                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
1137         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
1138                 LASSERT(req->rq_phase == expected_phase);
1139                 n++;
1140         }
1141
1142         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
1143                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
1144                  atomic_read(&set->set_remaining), n);
1145
1146         while ((req = list_first_entry_or_null(&set->set_requests,
1147                                                struct ptlrpc_request,
1148                                                rq_set_chain))) {
1149                 list_del_init(&req->rq_set_chain);
1150
1151                 LASSERT(req->rq_phase == expected_phase);
1152
1153                 if (req->rq_phase == RQ_PHASE_NEW) {
1154                         ptlrpc_req_interpret(NULL, req, -EBADR);
1155                         atomic_dec(&set->set_remaining);
1156                 }
1157
1158                 spin_lock(&req->rq_lock);
1159                 req->rq_set = NULL;
1160                 req->rq_invalid_rqset = 0;
1161                 spin_unlock(&req->rq_lock);
1162
1163                 ptlrpc_req_finished(req);
1164         }
1165
1166         LASSERT(atomic_read(&set->set_remaining) == 0);
1167
1168         ptlrpc_reqset_put(set);
1169         EXIT;
1170 }
1171 EXPORT_SYMBOL(ptlrpc_set_destroy);
1172
1173 /**
1174  * Add a new request to the general purpose request set.
1175  * Assumes request reference from the caller.
1176  */
1177 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
1178                         struct ptlrpc_request *req)
1179 {
1180         if (set == PTLRPCD_SET) {
1181                 ptlrpcd_add_req(req);
1182                 return;
1183         }
1184
1185         LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
1186         LASSERT(list_empty(&req->rq_set_chain));
1187
1188         if (req->rq_allow_intr)
1189                 set->set_allow_intr = 1;
1190
1191         /* The set takes over the caller's request reference */
1192         list_add_tail(&req->rq_set_chain, &set->set_requests);
1193         req->rq_set = set;
1194         atomic_inc(&set->set_remaining);
1195         req->rq_queued_time = ktime_get_seconds();
1196
1197         if (req->rq_reqmsg) {
1198                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1199                 lustre_msg_set_uid_gid(req->rq_reqmsg, NULL, NULL);
1200         }
1201
1202         if (set->set_producer)
1203                 /*
1204                  * If the request set has a producer callback, the RPC must be
1205                  * sent straight away
1206                  */
1207                 ptlrpc_send_new_req(req);
1208 }
1209 EXPORT_SYMBOL(ptlrpc_set_add_req);
1210
1211 /**
1212  * Add a request to a request with dedicated server thread
1213  * and wake the thread to make any necessary processing.
1214  * Currently only used for ptlrpcd.
1215  */
1216 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1217                             struct ptlrpc_request *req)
1218 {
1219         struct ptlrpc_request_set *set = pc->pc_set;
1220         int count, i;
1221
1222         LASSERT(req->rq_set == NULL);
1223         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1224
1225         spin_lock(&set->set_new_req_lock);
1226         /*
1227          * The set takes over the caller's request reference.
1228          */
1229         req->rq_set = set;
1230         req->rq_queued_time = ktime_get_seconds();
1231         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1232         count = atomic_inc_return(&set->set_new_count);
1233         spin_unlock(&set->set_new_req_lock);
1234
1235         /* Only need to call wakeup once for the first entry. */
1236         if (count == 1) {
1237                 wake_up(&set->set_waitq);
1238
1239                 /*
1240                  * XXX: It maybe unnecessary to wakeup all the partners. But to
1241                  *      guarantee the async RPC can be processed ASAP, we have
1242                  *      no other better choice. It maybe fixed in future.
1243                  */
1244                 for (i = 0; i < pc->pc_npartners; i++)
1245                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1246         }
1247 }
1248
1249 /**
1250  * Based on the current state of the import, determine if the request
1251  * can be sent, is an error, or should be delayed.
1252  *
1253  * Returns true if this request should be delayed. If false, and
1254  * *status is set, then the request can not be sent and *status is the
1255  * error code.  If false and status is 0, then request can be sent.
1256  *
1257  * The imp->imp_lock must be held.
1258  */
1259 static int ptlrpc_import_delay_req(struct obd_import *imp,
1260                                    struct ptlrpc_request *req, int *status)
1261 {
1262         int delay = 0;
1263
1264         ENTRY;
1265         LASSERT(status);
1266         *status = 0;
1267
1268         if (req->rq_ctx_init || req->rq_ctx_fini) {
1269                 /* always allow ctx init/fini rpc go through */
1270         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1271                 DEBUG_REQ(D_ERROR, req, "Uninitialized import");
1272                 *status = -EIO;
1273         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1274                 unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
1275
1276                 /*
1277                  * pings or MDS-equivalent STATFS may safely
1278                  * race with umount
1279                  */
1280                 DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
1281                           D_HA : D_ERROR, req, "IMP_CLOSED");
1282                 *status = -EIO;
1283         } else if (ptlrpc_send_limit_expired(req)) {
1284                 /* probably doesn't need to be a D_ERROR afterinitial testing */
1285                 DEBUG_REQ(D_HA, req, "send limit expired");
1286                 *status = -ETIMEDOUT;
1287         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1288                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1289                 ;/* allow CONNECT even if import is invalid */
1290                 if (atomic_read(&imp->imp_inval_count) != 0) {
1291                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1292                         *status = -EIO;
1293                 }
1294         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1295                 if (!imp->imp_deactive)
1296                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1297                 *status = -ESHUTDOWN; /* b=12940 */
1298         } else if (req->rq_import_generation != imp->imp_generation) {
1299                 DEBUG_REQ(req->rq_no_resend ? D_INFO : D_ERROR,
1300                           req, "req wrong generation:");
1301                 *status = -EIO;
1302         } else if (req->rq_send_state != imp->imp_state) {
1303                 /* invalidate in progress - any requests should be drop */
1304                 if (atomic_read(&imp->imp_inval_count) != 0) {
1305                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1306                         *status = -EIO;
1307                 } else if (req->rq_no_delay &&
1308                            imp->imp_generation != imp->imp_initiated_at) {
1309                         /* ignore nodelay for requests initiating connections */
1310                         *status = -EAGAIN;
1311                 } else if (req->rq_allow_replay &&
1312                            (imp->imp_state == LUSTRE_IMP_REPLAY ||
1313                             imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1314                             imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1315                             imp->imp_state == LUSTRE_IMP_RECOVER)) {
1316                         DEBUG_REQ(D_HA, req, "allow during recovery");
1317                 } else {
1318                         delay = 1;
1319                 }
1320         }
1321
1322         RETURN(delay);
1323 }
1324
1325 /**
1326  * Decide if the error message should be printed to the console or not.
1327  * Makes its decision based on request type, status, and failure frequency.
1328  *
1329  * \param[in] req  request that failed and may need a console message
1330  *
1331  * \retval false if no message should be printed
1332  * \retval true  if console message should be printed
1333  */
1334 static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
1335 {
1336         LASSERT(req->rq_reqmsg != NULL);
1337
1338         /* Suppress particular reconnect errors which are to be expected. */
1339         if (opc == OST_CONNECT || opc == OST_DISCONNECT ||
1340             opc == MDS_CONNECT || opc == MDS_DISCONNECT ||
1341             opc == MGS_CONNECT || opc == MGS_DISCONNECT) {
1342                 /* Suppress timed out reconnect/disconnect requests */
1343                 if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
1344                     req->rq_timedout)
1345                         return false;
1346
1347                 /*
1348                  * Suppress most unavailable/again reconnect requests, but
1349                  * print occasionally so it is clear client is trying to
1350                  * connect to a server where no target is running.
1351                  */
1352                 if ((err == -ENODEV || err == -EAGAIN) &&
1353                     req->rq_import->imp_conn_cnt % 30 != 20)
1354                         return false;
1355         }
1356
1357         if (opc == LDLM_ENQUEUE && err == -EAGAIN)
1358                 /* -EAGAIN is normal when using POSIX flocks */
1359                 return false;
1360
1361         if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
1362             (req->rq_xid & 0xf) != 10)
1363                 /* Suppress most ping requests, they may fail occasionally */
1364                 return false;
1365
1366         return true;
1367 }
1368
1369 /**
1370  * Check request processing status.
1371  * Returns the status.
1372  */
1373 static int ptlrpc_check_status(struct ptlrpc_request *req)
1374 {
1375         int rc;
1376
1377         ENTRY;
1378         rc = lustre_msg_get_status(req->rq_repmsg);
1379         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1380                 struct obd_import *imp = req->rq_import;
1381                 struct lnet_nid *nid = &imp->imp_connection->c_peer.nid;
1382                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1383
1384                 if (ptlrpc_console_allow(req, opc, rc))
1385                         LCONSOLE_ERROR_MSG(0x11,
1386                                            "%s: operation %s to node %s failed: rc = %d\n",
1387                                            imp->imp_obd->obd_name,
1388                                            ll_opcode2str(opc),
1389                                            libcfs_nidstr(nid), rc);
1390                 RETURN(rc < 0 ? rc : -EINVAL);
1391         }
1392
1393         if (rc)
1394                 DEBUG_REQ(D_INFO, req, "check status: rc = %d", rc);
1395
1396         RETURN(rc);
1397 }
1398
1399 /**
1400  * save pre-versions of objects into request for replay.
1401  * Versions are obtained from server reply.
1402  * used for VBR.
1403  */
1404 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1405 {
1406         struct lustre_msg *repmsg = req->rq_repmsg;
1407         struct lustre_msg *reqmsg = req->rq_reqmsg;
1408         __u64 *versions = lustre_msg_get_versions(repmsg);
1409
1410         ENTRY;
1411         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1412                 return;
1413
1414         LASSERT(versions);
1415         lustre_msg_set_versions(reqmsg, versions);
1416         CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
1417                versions[0], versions[1]);
1418
1419         EXIT;
1420 }
1421
1422 __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1423 {
1424         struct ptlrpc_request *req;
1425
1426         assert_spin_locked(&imp->imp_lock);
1427         if (list_empty(&imp->imp_unreplied_list))
1428                 return 0;
1429
1430         req = list_first_entry(&imp->imp_unreplied_list, struct ptlrpc_request,
1431                                rq_unreplied_list);
1432         LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1433
1434         if (imp->imp_known_replied_xid < req->rq_xid - 1)
1435                 imp->imp_known_replied_xid = req->rq_xid - 1;
1436
1437         return req->rq_xid - 1;
1438 }
1439
1440 /**
1441  * Callback function called when client receives RPC reply for \a req.
1442  * Returns 0 on success or error code.
1443  * The return alue would be assigned to req->rq_status by the caller
1444  * as request processing status.
1445  * This function also decides if the request needs to be saved for later replay.
1446  */
1447 static int after_reply(struct ptlrpc_request *req)
1448 {
1449         struct obd_import *imp = req->rq_import;
1450         struct obd_device *obd = req->rq_import->imp_obd;
1451         ktime_t work_start;
1452         u64 committed;
1453         s64 timediff;
1454         int rc;
1455
1456         ENTRY;
1457         LASSERT(obd != NULL);
1458         /* repbuf must be unlinked */
1459         LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
1460
1461         if (req->rq_reply_truncated) {
1462                 if (ptlrpc_no_resend(req)) {
1463                         DEBUG_REQ(D_ERROR, req,
1464                                   "reply buffer overflow, expected=%d, actual size=%d",
1465                                   req->rq_nob_received, req->rq_repbuf_len);
1466                         RETURN(-EOVERFLOW);
1467                 }
1468
1469                 sptlrpc_cli_free_repbuf(req);
1470                 /*
1471                  * Pass the required reply buffer size (include
1472                  * space for early reply).
1473                  * NB: no need to roundup because alloc_repbuf
1474                  * will roundup it
1475                  */
1476                 req->rq_replen = req->rq_nob_received;
1477                 req->rq_nob_received = 0;
1478                 spin_lock(&req->rq_lock);
1479                 req->rq_resend       = 1;
1480                 spin_unlock(&req->rq_lock);
1481                 RETURN(0);
1482         }
1483
1484         work_start = ktime_get_real();
1485         timediff = ktime_us_delta(work_start, req->rq_sent_ns);
1486
1487         /*
1488          * NB Until this point, the whole of the incoming message,
1489          * including buflens, status etc is in the sender's byte order.
1490          */
1491         rc = sptlrpc_cli_unwrap_reply(req);
1492         if (rc) {
1493                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed: rc = %d", rc);
1494                 RETURN(rc);
1495         }
1496
1497         /*
1498          * Security layer unwrap might ask resend this request.
1499          */
1500         if (req->rq_resend)
1501                 RETURN(0);
1502
1503         rc = unpack_reply(req);
1504         if (rc)
1505                 RETURN(rc);
1506
1507         /* retry indefinitely on EINPROGRESS */
1508         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1509             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1510                 time64_t now = ktime_get_real_seconds();
1511
1512                 DEBUG_REQ((req->rq_nr_resend % 8 == 1 ? D_WARNING : 0) |
1513                           D_RPCTRACE, req, "resending request on EINPROGRESS");
1514                 spin_lock(&req->rq_lock);
1515                 req->rq_resend = 1;
1516                 spin_unlock(&req->rq_lock);
1517                 req->rq_nr_resend++;
1518
1519                 /* Readjust the timeout for current conditions */
1520                 ptlrpc_at_set_req_timeout(req);
1521                 /*
1522                  * delay resend to give a chance to the server to get ready.
1523                  * The delay is increased by 1s on every resend and is capped to
1524                  * the current request timeout (i.e. obd_timeout if AT is off,
1525                  * or AT service time x 125% + 5s, see at_est2timeout)
1526                  */
1527                 if (req->rq_nr_resend > req->rq_timeout)
1528                         req->rq_sent = now + req->rq_timeout;
1529                 else
1530                         req->rq_sent = now + req->rq_nr_resend;
1531
1532                 /* Resend for EINPROGRESS will use a new XID */
1533                 spin_lock(&imp->imp_lock);
1534                 list_del_init(&req->rq_unreplied_list);
1535                 spin_unlock(&imp->imp_lock);
1536
1537                 RETURN(0);
1538         }
1539
1540         if (obd->obd_svc_stats) {
1541                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1542                                     timediff);
1543                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1544         }
1545
1546         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1547             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1548                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1549                           lustre_msg_get_type(req->rq_repmsg));
1550                 RETURN(-EPROTO);
1551         }
1552
1553         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1554                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1555         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1556         ptlrpc_at_adj_net_latency(req,
1557                                   lustre_msg_get_service_timeout(req->rq_repmsg));
1558
1559         rc = ptlrpc_check_status(req);
1560
1561         if (rc) {
1562                 /*
1563                  * Either we've been evicted, or the server has failed for
1564                  * some reason. Try to reconnect, and if that fails, punt to
1565                  * the upcall.
1566                  */
1567                 if (ptlrpc_recoverable_error(rc)) {
1568                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1569                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1570                                 RETURN(rc);
1571                         }
1572                         ptlrpc_request_handle_notconn(req);
1573                         RETURN(rc);
1574                 }
1575         } else {
1576                 /*
1577                  * Let's look if server sent slv. Do it only for RPC with
1578                  * rc == 0.
1579                  */
1580                 ldlm_cli_update_pool(req);
1581         }
1582
1583         /*
1584          * Store transno in reqmsg for replay.
1585          */
1586         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1587                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1588                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1589         }
1590
1591         if (lustre_msg_get_transno(req->rq_repmsg) ||
1592             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_ENQUEUE)
1593                 imp->imp_no_cached_data = 0;
1594
1595         if (imp->imp_replayable) {
1596                 /* if other threads are waiting for ptlrpc_free_committed()
1597                  * they could continue the work of freeing RPCs. That reduces
1598                  * lock hold times, and distributes work more fairly across
1599                  * waiting threads.  We can't use spin_is_contended() since
1600                  * there are many other places where imp_lock is held.
1601                  */
1602                 atomic_inc(&imp->imp_waiting);
1603                 spin_lock(&imp->imp_lock);
1604                 atomic_dec(&imp->imp_waiting);
1605                 /*
1606                  * No point in adding already-committed requests to the replay
1607                  * list, we will just remove them immediately. b=9829
1608                  */
1609                 if (req->rq_transno != 0 &&
1610                     (req->rq_transno >
1611                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1612                      req->rq_replay)) {
1613                         /** version recovery */
1614                         ptlrpc_save_versions(req);
1615                         ptlrpc_retain_replayable_request(req, imp);
1616                 } else if (req->rq_commit_cb &&
1617                            list_empty(&req->rq_replay_list)) {
1618                         /*
1619                          * NB: don't call rq_commit_cb if it's already on
1620                          * rq_replay_list, ptlrpc_free_committed() will call
1621                          * it later, see LU-3618 for details
1622                          */
1623                         spin_unlock(&imp->imp_lock);
1624                         req->rq_commit_cb(req);
1625                         atomic_inc(&imp->imp_waiting);
1626                         spin_lock(&imp->imp_lock);
1627                         atomic_dec(&imp->imp_waiting);
1628                 }
1629
1630                 /*
1631                  * Replay-enabled imports return commit-status information.
1632                  */
1633                 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1634                 if (likely(committed > imp->imp_peer_committed_transno))
1635                         imp->imp_peer_committed_transno = committed;
1636
1637                 ptlrpc_free_committed(imp);
1638
1639                 if (!list_empty(&imp->imp_replay_list)) {
1640                         struct ptlrpc_request *last;
1641
1642                         last = list_entry(imp->imp_replay_list.prev,
1643                                           struct ptlrpc_request,
1644                                           rq_replay_list);
1645                         /*
1646                          * Requests with rq_replay stay on the list even if no
1647                          * commit is expected.
1648                          */
1649                         if (last->rq_transno > imp->imp_peer_committed_transno)
1650                                 ptlrpc_pinger_commit_expected(imp);
1651                 }
1652
1653                 spin_unlock(&imp->imp_lock);
1654         }
1655
1656         RETURN(rc);
1657 }
1658
1659 /**
1660  * Helper function to send request \a req over the network for the first time
1661  * Also adjusts request phase.
1662  * Returns 0 on success or error code.
1663  */
1664 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1665 {
1666         struct obd_import *imp = req->rq_import;
1667         __u64 min_xid = 0;
1668         int rc;
1669
1670         ENTRY;
1671         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1672
1673         /* do not try to go further if there is not enough memory in pool */
1674         if (req->rq_sent && req->rq_bulk)
1675                 if (req->rq_bulk->bd_iov_count >
1676                     obd_pool_get_free_pages(0) &&
1677                     pool_is_at_full_capacity(0))
1678                         RETURN(-ENOMEM);
1679
1680         if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1681             (!req->rq_generation_set ||
1682              req->rq_import_generation == imp->imp_generation))
1683                 RETURN(0);
1684
1685         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1686
1687         spin_lock(&imp->imp_lock);
1688
1689         LASSERT(req->rq_xid != 0);
1690         LASSERT(!list_empty(&req->rq_unreplied_list));
1691
1692         if (!req->rq_generation_set)
1693                 req->rq_import_generation = imp->imp_generation;
1694
1695         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1696                 spin_lock(&req->rq_lock);
1697                 req->rq_waiting = 1;
1698                 spin_unlock(&req->rq_lock);
1699
1700                 DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
1701                           ptlrpc_import_state_name(req->rq_send_state),
1702                           ptlrpc_import_state_name(imp->imp_state));
1703                 LASSERT(list_empty(&req->rq_list));
1704                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1705                 atomic_inc(&req->rq_import->imp_inflight);
1706                 spin_unlock(&imp->imp_lock);
1707                 RETURN(0);
1708         }
1709
1710         if (rc != 0) {
1711                 spin_unlock(&imp->imp_lock);
1712                 req->rq_status = rc;
1713                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1714                 RETURN(rc);
1715         }
1716
1717         LASSERT(list_empty(&req->rq_list));
1718         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1719         atomic_inc(&req->rq_import->imp_inflight);
1720
1721         /*
1722          * find the known replied XID from the unreplied list, CONNECT
1723          * and DISCONNECT requests are skipped to make the sanity check
1724          * on server side happy. see process_req_last_xid().
1725          *
1726          * For CONNECT: Because replay requests have lower XID, it'll
1727          * break the sanity check if CONNECT bump the exp_last_xid on
1728          * server.
1729          *
1730          * For DISCONNECT: Since client will abort inflight RPC before
1731          * sending DISCONNECT, DISCONNECT may carry an XID which higher
1732          * than the inflight RPC.
1733          */
1734         if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1735                 min_xid = ptlrpc_known_replied_xid(imp);
1736         spin_unlock(&imp->imp_lock);
1737
1738         lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1739
1740         lustre_msg_set_status(req->rq_reqmsg, current->pid);
1741
1742         /* If the request to be sent is an LDLM callback, do not try to
1743          * refresh context.
1744          * An LDLM callback is sent by a server to a client in order to make
1745          * it release a lock, on a communication channel that uses a reverse
1746          * context. It cannot be refreshed on its own, as it is the 'reverse'
1747          * (server-side) representation of a client context.
1748          * We do not care if the reverse context is expired, and want to send
1749          * the LDLM callback anyway. Once the client receives the AST, it is
1750          * its job to refresh its own context if it has expired, hence
1751          * refreshing the associated reverse context on server side, before
1752          * being able to send the LDLM_CANCEL requested by the server.
1753          */
1754         if (lustre_msg_get_opc(req->rq_reqmsg) != LDLM_BL_CALLBACK &&
1755             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_CP_CALLBACK &&
1756             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_GL_CALLBACK)
1757                 rc = sptlrpc_req_refresh_ctx(req, 0);
1758         if (rc) {
1759                 if (req->rq_err) {
1760                         req->rq_status = rc;
1761                         RETURN(1);
1762                 } else {
1763                         spin_lock(&req->rq_lock);
1764                         req->rq_wait_ctx = 1;
1765                         spin_unlock(&req->rq_lock);
1766                         RETURN(0);
1767                 }
1768         }
1769
1770         CDEBUG(D_RPCTRACE,
1771                "Sending RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
1772                req, current->comm,
1773                imp->imp_obd->obd_uuid.uuid,
1774                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1775                obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg),
1776                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
1777
1778         rc = ptl_send_rpc(req, 0);
1779         if (rc == -ENOMEM) {
1780                 spin_lock(&imp->imp_lock);
1781                 if (!list_empty(&req->rq_list)) {
1782                         list_del_init(&req->rq_list);
1783                         if (atomic_dec_and_test(&req->rq_import->imp_inflight))
1784                                 wake_up(&req->rq_import->imp_recovery_waitq);
1785                 }
1786                 spin_unlock(&imp->imp_lock);
1787                 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1788                 RETURN(rc);
1789         }
1790         if (rc) {
1791                 DEBUG_REQ(D_HA, req, "send failed, expect timeout: rc = %d",
1792                           rc);
1793                 spin_lock(&req->rq_lock);
1794                 req->rq_net_err = 1;
1795                 spin_unlock(&req->rq_lock);
1796                 RETURN(rc);
1797         }
1798         RETURN(0);
1799 }
1800
1801 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1802 {
1803         int remaining, rc;
1804
1805         ENTRY;
1806         LASSERT(set->set_producer != NULL);
1807
1808         remaining = atomic_read(&set->set_remaining);
1809
1810         /*
1811          * populate the ->set_requests list with requests until we
1812          * reach the maximum number of RPCs in flight for this set
1813          */
1814         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1815                 rc = set->set_producer(set, set->set_producer_arg);
1816                 if (rc == -ENOENT) {
1817                         /* no more RPC to produce */
1818                         set->set_producer     = NULL;
1819                         set->set_producer_arg = NULL;
1820                         RETURN(0);
1821                 }
1822         }
1823
1824         RETURN((atomic_read(&set->set_remaining) - remaining));
1825 }
1826
1827 /**
1828  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1829  * and no more replies are expected.
1830  * (it is possible to get less replies than requests sent e.g. due to timed out
1831  * requests or requests that we had trouble to send out)
1832  *
1833  * NOTE: This function contains a potential schedule point (cond_resched()).
1834  */
1835 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1836 {
1837         struct ptlrpc_request *req, *next;
1838         LIST_HEAD(comp_reqs);
1839         int force_timer_recalc = 0;
1840
1841         ENTRY;
1842         if (atomic_read(&set->set_remaining) == 0)
1843                 RETURN(1);
1844
1845         list_for_each_entry_safe(req, next, &set->set_requests,
1846                                  rq_set_chain) {
1847                 struct obd_import *imp = req->rq_import;
1848                 int unregistered = 0;
1849                 int async = 1;
1850                 int rc = 0;
1851
1852                 if (req->rq_phase == RQ_PHASE_COMPLETE) {
1853                         list_move_tail(&req->rq_set_chain, &comp_reqs);
1854                         continue;
1855                 }
1856
1857                 /*
1858                  * This schedule point is mainly for the ptlrpcd caller of this
1859                  * function.  Most ptlrpc sets are not long-lived and unbounded
1860                  * in length, but at the least the set used by the ptlrpcd is.
1861                  * Since the processing time is unbounded, we need to insert an
1862                  * explicit schedule point to make the thread well-behaved.
1863                  */
1864                 cond_resched();
1865
1866                 /*
1867                  * If the caller requires to allow to be interpreted by force
1868                  * and it has really been interpreted, then move the request
1869                  * to RQ_PHASE_INTERPRET phase in spite of what the current
1870                  * phase is.
1871                  */
1872                 if (unlikely(req->rq_allow_intr && req->rq_intr)) {
1873                         req->rq_status = -EINTR;
1874                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1875
1876                         /*
1877                          * Since it is interpreted and we have to wait for
1878                          * the reply to be unlinked, then use sync mode.
1879                          */
1880                         async = 0;
1881
1882                         GOTO(interpret, req->rq_status);
1883                 }
1884
1885                 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
1886                         force_timer_recalc = 1;
1887
1888                 /* delayed send - skip */
1889                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1890                         continue;
1891
1892                 /* delayed resend - skip */
1893                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1894                     req->rq_sent > ktime_get_real_seconds())
1895                         continue;
1896
1897                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1898                       req->rq_phase == RQ_PHASE_BULK ||
1899                       req->rq_phase == RQ_PHASE_INTERPRET ||
1900                       req->rq_phase == RQ_PHASE_UNREG_RPC ||
1901                       req->rq_phase == RQ_PHASE_UNREG_BULK)) {
1902                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1903                         LBUG();
1904                 }
1905
1906                 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
1907                     req->rq_phase == RQ_PHASE_UNREG_BULK) {
1908                         LASSERT(req->rq_next_phase != req->rq_phase);
1909                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1910
1911                         if (req->rq_req_deadline &&
1912                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
1913                                 req->rq_req_deadline = 0;
1914                         if (req->rq_reply_deadline &&
1915                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
1916                                 req->rq_reply_deadline = 0;
1917                         if (req->rq_bulk_deadline &&
1918                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
1919                                 req->rq_bulk_deadline = 0;
1920
1921                         /*
1922                          * Skip processing until reply is unlinked. We
1923                          * can't return to pool before that and we can't
1924                          * call interpret before that. We need to make
1925                          * sure that all rdma transfers finished and will
1926                          * not corrupt any data.
1927                          */
1928                         if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
1929                             ptlrpc_cli_wait_unlink(req))
1930                                 continue;
1931                         if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
1932                             ptlrpc_client_bulk_active(req))
1933                                 continue;
1934
1935                         /*
1936                          * Turn fail_loc off to prevent it from looping
1937                          * forever.
1938                          */
1939                         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1940                                 CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1941                                                      CFS_FAIL_ONCE);
1942                         }
1943                         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1944                                 CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1945                                                      CFS_FAIL_ONCE);
1946                         }
1947
1948                         /*
1949                          * Move to next phase if reply was successfully
1950                          * unlinked.
1951                          */
1952                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1953                 }
1954
1955                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1956                         GOTO(interpret, req->rq_status);
1957
1958                 /*
1959                  * Note that this also will start async reply unlink.
1960                  */
1961                 if (req->rq_net_err && !req->rq_timedout) {
1962                         ptlrpc_expire_one_request(req, 1);
1963
1964                         /*
1965                          * Check if we still need to wait for unlink.
1966                          */
1967                         if (ptlrpc_cli_wait_unlink(req) ||
1968                             ptlrpc_client_bulk_active(req))
1969                                 continue;
1970                         /* If there is no need to resend, fail it now. */
1971                         if (req->rq_no_resend) {
1972                                 if (req->rq_status == 0)
1973                                         req->rq_status = -EIO;
1974                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1975                                 GOTO(interpret, req->rq_status);
1976                         } else {
1977                                 continue;
1978                         }
1979                 }
1980
1981                 if (req->rq_err) {
1982                         if (!ptlrpc_unregister_reply(req, 1)) {
1983                                 ptlrpc_unregister_bulk(req, 1);
1984                                 continue;
1985                         }
1986
1987                         spin_lock(&req->rq_lock);
1988                         req->rq_replied = 0;
1989                         spin_unlock(&req->rq_lock);
1990                         if (req->rq_status == 0)
1991                                 req->rq_status = -EIO;
1992                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1993                         GOTO(interpret, req->rq_status);
1994                 }
1995
1996                 /*
1997                  * ptlrpc_set_wait uses l_wait_event_abortable_timeout()
1998                  * so it sets rq_intr regardless of individual rpc
1999                  * timeouts. The synchronous IO waiting path sets
2000                  * rq_intr irrespective of whether ptlrpcd
2001                  * has seen a timeout.  Our policy is to only interpret
2002                  * interrupted rpcs after they have timed out, so we
2003                  * need to enforce that here.
2004                  */
2005
2006                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
2007                                      req->rq_wait_ctx)) {
2008                         req->rq_status = -EINTR;
2009                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2010                         GOTO(interpret, req->rq_status);
2011                 }
2012
2013                 if (req->rq_phase == RQ_PHASE_RPC) {
2014                         if (req->rq_timedout || req->rq_resend ||
2015                             req->rq_waiting || req->rq_wait_ctx) {
2016                                 int status;
2017
2018                                 if (!ptlrpc_unregister_reply(req, 1)) {
2019                                         ptlrpc_unregister_bulk(req, 1);
2020                                         continue;
2021                                 }
2022
2023                                 spin_lock(&imp->imp_lock);
2024                                 if (ptlrpc_import_delay_req(imp, req,
2025                                                             &status)) {
2026                                         /*
2027                                          * put on delay list - only if we wait
2028                                          * recovery finished - before send
2029                                          */
2030                                         list_move_tail(&req->rq_list,
2031                                                        &imp->imp_delayed_list);
2032                                         spin_unlock(&imp->imp_lock);
2033                                         continue;
2034                                 }
2035
2036                                 if (status != 0)  {
2037                                         req->rq_status = status;
2038                                         ptlrpc_rqphase_move(req,
2039                                                             RQ_PHASE_INTERPRET);
2040                                         spin_unlock(&imp->imp_lock);
2041                                         GOTO(interpret, req->rq_status);
2042                                 }
2043                                 /* ignore on just initiated connections */
2044                                 if (ptlrpc_no_resend(req) &&
2045                                     !req->rq_wait_ctx &&
2046                                     imp->imp_generation !=
2047                                     imp->imp_initiated_at) {
2048                                         req->rq_status = -ENOTCONN;
2049                                         ptlrpc_rqphase_move(req,
2050                                                             RQ_PHASE_INTERPRET);
2051                                         spin_unlock(&imp->imp_lock);
2052                                         GOTO(interpret, req->rq_status);
2053                                 }
2054
2055                                 /* don't resend too fast in case of network
2056                                  * errors.
2057                                  */
2058                                 if (ktime_get_real_seconds() < (req->rq_sent + 1)
2059                                     && req->rq_net_err && req->rq_timedout) {
2060
2061                                         DEBUG_REQ(D_INFO, req,
2062                                                   "throttle request");
2063                                         /* Don't try to resend RPC right away
2064                                          * as it is likely it will fail again
2065                                          * and ptlrpc_check_set() will be
2066                                          * called again, keeping this thread
2067                                          * busy. Instead, wait for the next
2068                                          * timeout. Flag it as resend to
2069                                          * ensure we don't wait to long.
2070                                          */
2071                                         req->rq_resend = 1;
2072                                         spin_unlock(&imp->imp_lock);
2073                                         continue;
2074                                 }
2075
2076                                 list_move_tail(&req->rq_list,
2077                                                &imp->imp_sending_list);
2078
2079                                 spin_unlock(&imp->imp_lock);
2080
2081                                 spin_lock(&req->rq_lock);
2082                                 req->rq_waiting = 0;
2083                                 spin_unlock(&req->rq_lock);
2084
2085                                 if (req->rq_timedout || req->rq_resend) {
2086                                         /*
2087                                          * This is re-sending anyways,
2088                                          * let's mark req as resend.
2089                                          */
2090                                         spin_lock(&req->rq_lock);
2091                                         req->rq_resend = 1;
2092                                         spin_unlock(&req->rq_lock);
2093                                 }
2094                                 /*
2095                                  * rq_wait_ctx is only touched by ptlrpcd,
2096                                  * so no lock is needed here.
2097                                  */
2098                                 status = sptlrpc_req_refresh_ctx(req, 0);
2099                                 if (status) {
2100                                         if (req->rq_err) {
2101                                                 req->rq_status = status;
2102                                                 spin_lock(&req->rq_lock);
2103                                                 req->rq_wait_ctx = 0;
2104                                                 spin_unlock(&req->rq_lock);
2105                                                 force_timer_recalc = 1;
2106                                         } else {
2107                                                 spin_lock(&req->rq_lock);
2108                                                 req->rq_wait_ctx = 1;
2109                                                 spin_unlock(&req->rq_lock);
2110                                         }
2111
2112                                         continue;
2113                                 } else {
2114                                         spin_lock(&req->rq_lock);
2115                                         req->rq_wait_ctx = 0;
2116                                         spin_unlock(&req->rq_lock);
2117                                 }
2118
2119                                 /*
2120                                  * In any case, the previous bulk should be
2121                                  * cleaned up to prepare for the new sending
2122                                  */
2123                                 if (req->rq_bulk &&
2124                                     !ptlrpc_unregister_bulk(req, 1))
2125                                         continue;
2126
2127                                 rc = ptl_send_rpc(req, 0);
2128                                 if (rc == -ENOMEM) {
2129                                         spin_lock(&imp->imp_lock);
2130                                         if (!list_empty(&req->rq_list)) {
2131                                                 list_del_init(&req->rq_list);
2132                                                 if (atomic_dec_and_test(&imp->imp_inflight))
2133                                                         wake_up(&imp->imp_recovery_waitq);
2134                                         }
2135                                         spin_unlock(&imp->imp_lock);
2136                                         ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
2137                                         continue;
2138                                 }
2139                                 if (rc) {
2140                                         DEBUG_REQ(D_HA, req,
2141                                                   "send failed: rc = %d", rc);
2142                                         force_timer_recalc = 1;
2143                                         spin_lock(&req->rq_lock);
2144                                         req->rq_net_err = 1;
2145                                         spin_unlock(&req->rq_lock);
2146                                         continue;
2147                                 }
2148                                 /* need to reset the timeout */
2149                                 force_timer_recalc = 1;
2150                         }
2151
2152                         spin_lock(&req->rq_lock);
2153
2154                         if (ptlrpc_client_early(req)) {
2155                                 ptlrpc_at_recv_early_reply(req);
2156                                 spin_unlock(&req->rq_lock);
2157                                 continue;
2158                         }
2159
2160                         /* Still waiting for a reply? */
2161                         if (ptlrpc_client_recv(req)) {
2162                                 spin_unlock(&req->rq_lock);
2163                                 continue;
2164                         }
2165
2166                         /* Did we actually receive a reply? */
2167                         if (!ptlrpc_client_replied(req)) {
2168                                 spin_unlock(&req->rq_lock);
2169                                 continue;
2170                         }
2171
2172                         spin_unlock(&req->rq_lock);
2173
2174                         /*
2175                          * unlink from net because we are going to
2176                          * swab in-place of reply buffer
2177                          */
2178                         unregistered = ptlrpc_unregister_reply(req, 1);
2179                         if (!unregistered)
2180                                 continue;
2181
2182                         req->rq_status = after_reply(req);
2183                         if (req->rq_resend) {
2184                                 force_timer_recalc = 1;
2185                                 continue;
2186                         }
2187
2188                         /*
2189                          * If there is no bulk associated with this request,
2190                          * then we're done and should let the interpreter
2191                          * process the reply. Similarly if the RPC returned
2192                          * an error, and therefore the bulk will never arrive.
2193                          */
2194                         if (!req->rq_bulk || req->rq_status < 0) {
2195                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2196                                 GOTO(interpret, req->rq_status);
2197                         }
2198
2199                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
2200                 }
2201
2202                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
2203                 if (ptlrpc_client_bulk_active(req))
2204                         continue;
2205
2206                 if (req->rq_bulk->bd_failure) {
2207                         /*
2208                          * The RPC reply arrived OK, but the bulk screwed
2209                          * up!  Dead weird since the server told us the RPC
2210                          * was good after getting the REPLY for her GET or
2211                          * the ACK for her PUT.
2212                          */
2213                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed %d/%d/%d",
2214                                   req->rq_status,
2215                                   req->rq_bulk->bd_nob,
2216                                   req->rq_bulk->bd_nob_transferred);
2217                         req->rq_status = -EIO;
2218                 }
2219
2220                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2221
2222 interpret:
2223                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
2224
2225                 /*
2226                  * This moves to "unregistering" phase we need to wait for
2227                  * reply unlink.
2228                  */
2229                 if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
2230                         /* start async bulk unlink too */
2231                         ptlrpc_unregister_bulk(req, 1);
2232                         continue;
2233                 }
2234
2235                 if (!ptlrpc_unregister_bulk(req, async))
2236                         continue;
2237
2238                 /*
2239                  * When calling interpret receiving already should be
2240                  * finished.
2241                  */
2242                 LASSERT(!req->rq_receiving_reply);
2243
2244                 ptlrpc_req_interpret(env, req, req->rq_status);
2245
2246                 if (ptlrpcd_check_work(req)) {
2247                         atomic_dec(&set->set_remaining);
2248                         continue;
2249                 }
2250                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
2251
2252                 if (req->rq_reqmsg)
2253                         CDEBUG(D_RPCTRACE,
2254                                "Completed RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
2255                                req, current->comm,
2256                                imp->imp_obd->obd_uuid.uuid,
2257                                lustre_msg_get_status(req->rq_reqmsg),
2258                                req->rq_xid,
2259                                obd_import_nid2str(imp),
2260                                lustre_msg_get_opc(req->rq_reqmsg),
2261                                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
2262
2263                 spin_lock(&imp->imp_lock);
2264                 /*
2265                  * Request already may be not on sending or delaying list. This
2266                  * may happen in the case of marking it erroneous for the case
2267                  * ptlrpc_import_delay_req(req, status) find it impossible to
2268                  * allow sending this rpc and returns *status != 0.
2269                  */
2270                 if (!list_empty(&req->rq_list)) {
2271                         list_del_init(&req->rq_list);
2272                         if (atomic_dec_and_test(&imp->imp_inflight))
2273                                 wake_up(&imp->imp_recovery_waitq);
2274                 }
2275                 list_del_init(&req->rq_unreplied_list);
2276                 spin_unlock(&imp->imp_lock);
2277
2278                 atomic_dec(&set->set_remaining);
2279                 wake_up(&imp->imp_recovery_waitq);
2280
2281                 if (set->set_producer) {
2282                         /* produce a new request if possible */
2283                         if (ptlrpc_set_producer(set) > 0)
2284                                 force_timer_recalc = 1;
2285
2286                         /*
2287                          * free the request that has just been completed
2288                          * in order not to pollute set->set_requests
2289                          */
2290                         list_del_init(&req->rq_set_chain);
2291                         spin_lock(&req->rq_lock);
2292                         req->rq_set = NULL;
2293                         req->rq_invalid_rqset = 0;
2294                         spin_unlock(&req->rq_lock);
2295
2296                         /* record rq_status to compute the final status later */
2297                         if (req->rq_status != 0)
2298                                 set->set_rc = req->rq_status;
2299                         ptlrpc_req_finished(req);
2300                 } else {
2301                         list_move_tail(&req->rq_set_chain, &comp_reqs);
2302                 }
2303         }
2304
2305         /*
2306          * move completed request at the head of list so it's easier for
2307          * caller to find them
2308          */
2309         list_splice(&comp_reqs, &set->set_requests);
2310
2311         /* If we hit an error, we want to recover promptly. */
2312         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
2313 }
2314 EXPORT_SYMBOL(ptlrpc_check_set);
2315
2316 /**
2317  * Time out request \a req. is \a async_unlink is set, that means do not wait
2318  * until LNet actually confirms network buffer unlinking.
2319  * Return 1 if we should give up further retrying attempts or 0 otherwise.
2320  */
2321 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
2322 {
2323         struct obd_import *imp = req->rq_import;
2324         unsigned int debug_mask = D_RPCTRACE;
2325         int rc = 0;
2326         __u32 opc;
2327
2328         ENTRY;
2329         spin_lock(&req->rq_lock);
2330         req->rq_timedout = 1;
2331         spin_unlock(&req->rq_lock);
2332
2333         opc = lustre_msg_get_opc(req->rq_reqmsg);
2334         if (ptlrpc_console_allow(req, opc,
2335                                  lustre_msg_get_status(req->rq_reqmsg)))
2336                 debug_mask = D_WARNING;
2337         DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
2338                   req->rq_net_err ? "failed due to network error" :
2339                      ((req->rq_real_sent == 0 ||
2340                        req->rq_real_sent < req->rq_sent ||
2341                        req->rq_real_sent >= req->rq_deadline) ?
2342                       "timed out for sent delay" : "timed out for slow reply"),
2343                   req->rq_sent, req->rq_real_sent);
2344
2345         if (imp && obd_debug_peer_on_timeout)
2346                 LNetDebugPeer(&imp->imp_connection->c_peer);
2347
2348         ptlrpc_unregister_reply(req, async_unlink);
2349         ptlrpc_unregister_bulk(req, async_unlink);
2350
2351         if (obd_dump_on_timeout)
2352                 libcfs_debug_dumplog();
2353
2354         if (!imp) {
2355                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
2356                 RETURN(1);
2357         }
2358
2359         atomic_inc(&imp->imp_timeouts);
2360
2361         /* The DLM server doesn't want recovery run on its imports. */
2362         if (imp->imp_dlm_fake)
2363                 RETURN(1);
2364
2365         /*
2366          * If this request is for recovery or other primordial tasks,
2367          * then error it out here.
2368          */
2369         if (req->rq_ctx_init || req->rq_ctx_fini ||
2370             req->rq_send_state != LUSTRE_IMP_FULL ||
2371             imp->imp_obd->obd_no_recov) {
2372                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
2373                           ptlrpc_import_state_name(req->rq_send_state),
2374                           ptlrpc_import_state_name(imp->imp_state));
2375                 spin_lock(&req->rq_lock);
2376                 req->rq_status = -ETIMEDOUT;
2377                 req->rq_err = 1;
2378                 spin_unlock(&req->rq_lock);
2379                 RETURN(1);
2380         }
2381
2382         /*
2383          * if a request can't be resent we can't wait for an answer after
2384          * the timeout
2385          */
2386         if (ptlrpc_no_resend(req)) {
2387                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
2388                 rc = 1;
2389         }
2390
2391         if (opc != OBD_PING || req->rq_xid > imp->imp_highest_replied_xid)
2392                 ptlrpc_fail_import(imp,
2393                                    lustre_msg_get_conn_cnt(req->rq_reqmsg));
2394
2395         RETURN(rc);
2396 }
2397
2398 /**
2399  * Time out all uncompleted requests in request set pointed by \a data
2400  * This is called when a wait times out.
2401  */
2402 void ptlrpc_expired_set(struct ptlrpc_request_set *set)
2403 {
2404         struct ptlrpc_request *req;
2405         time64_t now = ktime_get_real_seconds();
2406
2407         ENTRY;
2408         LASSERT(set != NULL);
2409
2410         /*
2411          * A timeout expired. See which reqs it applies to...
2412          */
2413         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2414                 /* don't expire request waiting for context */
2415                 if (req->rq_wait_ctx)
2416                         continue;
2417
2418                 /* Request in-flight? */
2419                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2420                        !req->rq_waiting && !req->rq_resend) ||
2421                       (req->rq_phase == RQ_PHASE_BULK)))
2422                         continue;
2423
2424                 if (req->rq_timedout ||     /* already dealt with */
2425                     req->rq_deadline > now) /* not expired */
2426                         continue;
2427
2428                 /*
2429                  * Deal with this guy. Do it asynchronously to not block
2430                  * ptlrpcd thread.
2431                  */
2432                 ptlrpc_expire_one_request(req, 1);
2433                 /*
2434                  * Loops require that we resched once in a while to avoid
2435                  * RCU stalls and a few other problems.
2436                  */
2437                 cond_resched();
2438
2439         }
2440 }
2441
2442 /**
2443  * Interrupts (sets interrupted flag) all uncompleted requests in
2444  * a set \a data. This is called when a wait_event is interrupted
2445  * by a signal.
2446  */
2447 static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set)
2448 {
2449         struct ptlrpc_request *req;
2450
2451         LASSERT(set != NULL);
2452         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2453
2454         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2455                 if (req->rq_intr)
2456                         continue;
2457
2458                 if (req->rq_phase != RQ_PHASE_RPC &&
2459                     req->rq_phase != RQ_PHASE_UNREG_RPC &&
2460                     !req->rq_allow_intr)
2461                         continue;
2462
2463                 spin_lock(&req->rq_lock);
2464                 req->rq_intr = 1;
2465                 spin_unlock(&req->rq_lock);
2466         }
2467 }
2468
2469 /**
2470  * Get the smallest timeout in the set; this does NOT set a timeout.
2471  */
2472 time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2473 {
2474         time64_t now = ktime_get_real_seconds();
2475         int timeout = 0;
2476         struct ptlrpc_request *req;
2477         time64_t deadline;
2478
2479         ENTRY;
2480         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2481                 /* Request in-flight? */
2482                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2483                       (req->rq_phase == RQ_PHASE_BULK) ||
2484                       (req->rq_phase == RQ_PHASE_NEW)))
2485                         continue;
2486
2487                 /* Already timed out. */
2488                 if (req->rq_timedout)
2489                         continue;
2490
2491                 /* Waiting for ctx. */
2492                 if (req->rq_wait_ctx)
2493                         continue;
2494
2495                 if (req->rq_phase == RQ_PHASE_NEW)
2496                         deadline = req->rq_sent;
2497                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2498                         deadline = req->rq_sent;
2499                 else
2500                         deadline = req->rq_sent + req->rq_timeout;
2501
2502                 if (deadline <= now)    /* actually expired already */
2503                         timeout = 1;    /* ASAP */
2504                 else if (timeout == 0 || timeout > deadline - now)
2505                         timeout = deadline - now;
2506         }
2507         RETURN(timeout);
2508 }
2509
2510 /**
2511  * Send all unset request from the set and then wait untill all
2512  * requests in the set complete (either get a reply, timeout, get an
2513  * error or otherwise be interrupted).
2514  * Returns 0 on success or error code otherwise.
2515  */
2516 int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set)
2517 {
2518         struct ptlrpc_request *req;
2519         time64_t timeout;
2520         int rc;
2521
2522         ENTRY;
2523         if (set->set_producer)
2524                 (void)ptlrpc_set_producer(set);
2525         else
2526                 list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2527                         if (req->rq_phase == RQ_PHASE_NEW)
2528                                 (void)ptlrpc_send_new_req(req);
2529                 }
2530
2531         if (list_empty(&set->set_requests))
2532                 RETURN(0);
2533
2534         do {
2535                 timeout = ptlrpc_set_next_timeout(set);
2536
2537                 /*
2538                  * wait until all complete, interrupted, or an in-flight
2539                  * req times out
2540                  */
2541                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
2542                        set, timeout);
2543
2544                 if ((timeout == 0 && !signal_pending(current)) ||
2545                     set->set_allow_intr) {
2546                         /*
2547                          * No requests are in-flight (ether timed out
2548                          * or delayed), so we can allow interrupts.
2549                          * We still want to block for a limited time,
2550                          * so we allow interrupts during the timeout.
2551                          */
2552                         rc = l_wait_event_abortable_timeout(
2553                                 set->set_waitq,
2554                                 ptlrpc_check_set(NULL, set),
2555                                 cfs_time_seconds(timeout ? timeout : 1));
2556                         if (rc == 0) {
2557                                 rc = -ETIMEDOUT;
2558                                 ptlrpc_expired_set(set);
2559                         } else if (rc < 0) {
2560                                 rc = -EINTR;
2561                                 ptlrpc_interrupted_set(set);
2562                         } else {
2563                                 rc = 0;
2564                         }
2565                 } else {
2566                         /*
2567                          * At least one request is in flight, so no
2568                          * interrupts are allowed. Wait until all
2569                          * complete, or an in-flight req times out.
2570                          */
2571                         rc = wait_event_idle_timeout(
2572                                 set->set_waitq,
2573                                 ptlrpc_check_set(NULL, set),
2574                                 cfs_time_seconds(timeout ? timeout : 1));
2575                         if (rc == 0) {
2576                                 ptlrpc_expired_set(set);
2577                                 rc = -ETIMEDOUT;
2578                         } else {
2579                                 rc = 0;
2580                         }
2581
2582                         /*
2583                          * LU-769 - if we ignored the signal because
2584                          * it was already pending when we started, we
2585                          * need to handle it now or we risk it being
2586                          * ignored forever
2587                          */
2588                         if (rc == -ETIMEDOUT &&
2589                             signal_pending(current)) {
2590                                 sigset_t old, new;
2591
2592                                 siginitset(&new, LUSTRE_FATAL_SIGS);
2593                                 sigprocmask(SIG_BLOCK, &new, &old);
2594                                 /*
2595                                  * In fact we only interrupt for the
2596                                  * "fatal" signals like SIGINT or
2597                                  * SIGKILL. We still ignore less
2598                                  * important signals since ptlrpc set
2599                                  * is not easily reentrant from
2600                                  * userspace again
2601                                  */
2602                                 if (signal_pending(current))
2603                                         ptlrpc_interrupted_set(set);
2604                                 sigprocmask(SIG_SETMASK, &old, NULL);
2605                         }
2606                 }
2607
2608                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2609
2610                 /*
2611                  * -EINTR => all requests have been flagged rq_intr so next
2612                  * check completes.
2613                  * -ETIMEDOUT => someone timed out.  When all reqs have
2614                  * timed out, signals are enabled allowing completion with
2615                  * EINTR.
2616                  * I don't really care if we go once more round the loop in
2617                  * the error cases -eeb.
2618                  */
2619                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2620                         list_for_each_entry(req, &set->set_requests,
2621                                             rq_set_chain) {
2622                                 spin_lock(&req->rq_lock);
2623                                 req->rq_invalid_rqset = 1;
2624                                 spin_unlock(&req->rq_lock);
2625                         }
2626                 }
2627         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2628
2629         LASSERT(atomic_read(&set->set_remaining) == 0);
2630
2631         rc = set->set_rc; /* rq_status of already freed requests if any */
2632         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2633                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2634                 if (req->rq_status != 0)
2635                         rc = req->rq_status;
2636         }
2637
2638         RETURN(rc);
2639 }
2640 EXPORT_SYMBOL(ptlrpc_set_wait);
2641
2642 /**
2643  * Helper fuction for request freeing.
2644  * Called when request count reached zero and request needs to be freed.
2645  * Removes request from all sorts of sending/replay lists it might be on,
2646  * frees network buffers if any are present.
2647  * If \a locked is set, that means caller is already holding import imp_lock
2648  * and so we no longer need to reobtain it (for certain lists manipulations)
2649  */
2650 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2651 {
2652         ENTRY;
2653
2654         if (!request)
2655                 RETURN_EXIT;
2656
2657         LASSERT(!request->rq_srv_req);
2658         LASSERT(request->rq_export == NULL);
2659         LASSERTF(!request->rq_receiving_reply, "req %px\n", request);
2660         LASSERTF(list_empty(&request->rq_list), "req %px\n", request);
2661         LASSERTF(list_empty(&request->rq_set_chain), "req %px\n", request);
2662         LASSERTF(!request->rq_replay, "req %px\n", request);
2663
2664         req_capsule_fini(&request->rq_pill);
2665
2666         /*
2667          * We must take it off the imp_replay_list first.  Otherwise, we'll set
2668          * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2669          */
2670         if (request->rq_import) {
2671                 if (!locked)
2672                         spin_lock(&request->rq_import->imp_lock);
2673                 list_del_init(&request->rq_replay_list);
2674                 list_del_init(&request->rq_unreplied_list);
2675                 if (!locked)
2676                         spin_unlock(&request->rq_import->imp_lock);
2677         }
2678         LASSERTF(list_empty(&request->rq_replay_list), "req %px\n", request);
2679
2680         if (atomic_read(&request->rq_refcount) != 0) {
2681                 DEBUG_REQ(D_ERROR, request,
2682                           "freeing request with nonzero refcount");
2683                 LBUG();
2684         }
2685
2686         if (request->rq_repbuf)
2687                 sptlrpc_cli_free_repbuf(request);
2688
2689         if (request->rq_import) {
2690                 if (!ptlrpcd_check_work(request)) {
2691                         LASSERT(atomic_read(&request->rq_import->imp_reqs) > 0);
2692                         atomic_dec(&request->rq_import->imp_reqs);
2693                 }
2694                 class_import_put(request->rq_import);
2695                 request->rq_import = NULL;
2696         }
2697         if (request->rq_bulk)
2698                 ptlrpc_free_bulk(request->rq_bulk);
2699
2700         if (request->rq_reqbuf || request->rq_clrbuf)
2701                 sptlrpc_cli_free_reqbuf(request);
2702
2703         if (request->rq_cli_ctx)
2704                 sptlrpc_req_put_ctx(request, !locked);
2705
2706         if (request->rq_pool)
2707                 __ptlrpc_free_req_to_pool(request);
2708         else
2709                 ptlrpc_request_cache_free(request);
2710         EXIT;
2711 }
2712
2713 /**
2714  * Helper function
2715  * Drops one reference count for request \a request.
2716  * \a locked set indicates that caller holds import imp_lock.
2717  * Frees the request whe reference count reaches zero.
2718  *
2719  * \retval 1    the request is freed
2720  * \retval 0    some others still hold references on the request
2721  */
2722 static int __ptlrpc_req_put(struct ptlrpc_request *request, int locked)
2723 {
2724         int count;
2725
2726         ENTRY;
2727         if (!request)
2728                 RETURN(1);
2729
2730         LASSERT(request != LP_POISON);
2731         LASSERT(request->rq_reqmsg != LP_POISON);
2732
2733         DEBUG_REQ(D_INFO, request, "refcount now %u",
2734                   atomic_read(&request->rq_refcount) - 1);
2735
2736         spin_lock(&request->rq_lock);
2737         count = atomic_dec_return(&request->rq_refcount);
2738         LASSERTF(count >= 0, "Invalid ref count %d\n", count);
2739
2740         /*
2741          * For open RPC, the client does not know the EA size (LOV, ACL, and
2742          * so on) before replied, then the client has to reserve very large
2743          * reply buffer. Such buffer will not be released until the RPC freed.
2744          * Since The open RPC is replayable, we need to keep it in the replay
2745          * list until close. If there are a lot of files opened concurrently,
2746          * then the client may be OOM.
2747          *
2748          * If fact, it is unnecessary to keep reply buffer for open replay,
2749          * related EAs have already been saved via mdc_save_lovea() before
2750          * coming here. So it is safe to free the reply buffer some earlier
2751          * before releasing the RPC to avoid client OOM. LU-9514
2752          */
2753         if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
2754                 spin_lock(&request->rq_early_free_lock);
2755                 sptlrpc_cli_free_repbuf(request);
2756                 request->rq_repbuf = NULL;
2757                 request->rq_repbuf_len = 0;
2758                 request->rq_repdata = NULL;
2759                 request->rq_reqdata_len = 0;
2760                 spin_unlock(&request->rq_early_free_lock);
2761         }
2762         spin_unlock(&request->rq_lock);
2763
2764         if (!count)
2765                 __ptlrpc_free_req(request, locked);
2766
2767         RETURN(!count);
2768 }
2769
2770 /**
2771  * Drop one request reference. Must be called with import imp_lock held.
2772  * When reference count drops to zero, request is freed.
2773  */
2774 void ptlrpc_req_put_with_imp_lock(struct ptlrpc_request *request)
2775 {
2776         assert_spin_locked(&request->rq_import->imp_lock);
2777         (void)__ptlrpc_req_put(request, 1);
2778 }
2779
2780 /**
2781  * Drops one reference count for a request.
2782  */
2783 void ptlrpc_req_put(struct ptlrpc_request *request)
2784 {
2785         __ptlrpc_req_put(request, 0);
2786 }
2787 EXPORT_SYMBOL(ptlrpc_req_put);
2788
2789
2790 /**
2791  * Returns xid of a \a request
2792  */
2793 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2794 {
2795         return request->rq_xid;
2796 }
2797 EXPORT_SYMBOL(ptlrpc_req_xid);
2798
2799 /**
2800  * Disengage the client's reply buffer from the network
2801  * NB does _NOT_ unregister any client-side bulk.
2802  * IDEMPOTENT, but _not_ safe against concurrent callers.
2803  * The request owner (i.e. the thread doing the I/O) must call...
2804  * Returns 0 on success or 1 if unregistering cannot be made.
2805  */
2806 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2807 {
2808         bool discard = false;
2809         /*
2810          * Might sleep.
2811          */
2812         LASSERT(!in_interrupt());
2813
2814         /* Let's setup deadline for reply unlink. */
2815         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2816             async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
2817                 request->rq_reply_deadline = ktime_get_real_seconds() +
2818                                              PTLRPC_REQ_LONG_UNLINK;
2819
2820         /*
2821          * Nothing left to do.
2822          */
2823         if (!__ptlrpc_cli_wait_unlink(request, &discard))
2824                 RETURN(1);
2825
2826         LNetMDUnlink(request->rq_reply_md_h);
2827
2828         if (discard) /* Discard the request-out callback */
2829                 __LNetMDUnlink(request->rq_req_md_h, discard);
2830
2831         /*
2832          * Let's check it once again.
2833          */
2834         if (!ptlrpc_cli_wait_unlink(request))
2835                 RETURN(1);
2836
2837         /* Move to "Unregistering" phase as reply was not unlinked yet. */
2838         ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
2839
2840         /*
2841          * Do not wait for unlink to finish.
2842          */
2843         if (async)
2844                 RETURN(0);
2845
2846         /*
2847          * We have to wait_event_idle_timeout() whatever the result, to get
2848          * a chance to run reply_in_callback(), and to make sure we've
2849          * unlinked before returning a req to the pool.
2850          */
2851         for (;;) {
2852                 wait_queue_head_t *wq = (request->rq_set) ?
2853                                         &request->rq_set->set_waitq :
2854                                         &request->rq_reply_waitq;
2855                 int seconds = PTLRPC_REQ_LONG_UNLINK;
2856                 /*
2857                  * Network access will complete in finite time but the HUGE
2858                  * timeout lets us CWARN for visibility of sluggish NALs
2859                  */
2860                 while (seconds > 0 &&
2861                        wait_event_idle_timeout(
2862                                *wq,
2863                                !ptlrpc_cli_wait_unlink(request),
2864                                cfs_time_seconds(1)) == 0)
2865                         seconds -= 1;
2866                 if (seconds > 0) {
2867                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2868                         RETURN(1);
2869                 }
2870
2871                 DEBUG_REQ(D_WARNING, request,
2872                           "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2873                           request->rq_receiving_reply,
2874                           request->rq_req_unlinked,
2875                           request->rq_reply_unlinked);
2876         }
2877         RETURN(0);
2878 }
2879
2880 static void ptlrpc_free_request(struct ptlrpc_request *req)
2881 {
2882         spin_lock(&req->rq_lock);
2883         req->rq_replay = 0;
2884         spin_unlock(&req->rq_lock);
2885
2886         if (req->rq_commit_cb)
2887                 req->rq_commit_cb(req);
2888         list_del_init(&req->rq_replay_list);
2889
2890         __ptlrpc_req_put(req, 1);
2891 }
2892
2893 /**
2894  * the request is committed and dropped from the replay list of its import
2895  */
2896 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2897 {
2898         struct obd_import *imp = req->rq_import;
2899
2900         spin_lock(&imp->imp_lock);
2901         if (list_empty(&req->rq_replay_list)) {
2902                 spin_unlock(&imp->imp_lock);
2903                 return;
2904         }
2905
2906         if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
2907                 if (imp->imp_replay_cursor == &req->rq_replay_list)
2908                         imp->imp_replay_cursor = req->rq_replay_list.next;
2909                 ptlrpc_free_request(req);
2910         }
2911
2912         spin_unlock(&imp->imp_lock);
2913 }
2914 EXPORT_SYMBOL(ptlrpc_request_committed);
2915
2916 /**
2917  * Iterates through replay_list on import and prunes
2918  * all requests have transno smaller than last_committed for the
2919  * import and don't have rq_replay set.
2920  * Since requests are sorted in transno order, stops when meeting first
2921  * transno bigger than last_committed.
2922  * caller must hold imp->imp_lock
2923  */
2924 void ptlrpc_free_committed(struct obd_import *imp)
2925 {
2926         struct ptlrpc_request *req, *saved;
2927         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
2928         bool skip_committed_list = true;
2929         unsigned int replay_scanned = 0, replay_freed = 0;
2930         unsigned int commit_scanned = 0, commit_freed = 0;
2931         unsigned int debug_level = D_INFO;
2932         __u64 peer_committed_transno;
2933         int imp_generation;
2934         time64_t start, now;
2935
2936         ENTRY;
2937         LASSERT(imp != NULL);
2938         assert_spin_locked(&imp->imp_lock);
2939
2940         start = ktime_get_seconds();
2941         /* save these here, we can potentially drop imp_lock after checking */
2942         peer_committed_transno = imp->imp_peer_committed_transno;
2943         imp_generation = imp->imp_generation;
2944
2945         if (peer_committed_transno == imp->imp_last_transno_checked &&
2946             imp_generation == imp->imp_last_generation_checked) {
2947                 CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
2948                        imp->imp_obd->obd_name, peer_committed_transno);
2949                 RETURN_EXIT;
2950         }
2951         CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
2952                imp->imp_obd->obd_name, peer_committed_transno, imp_generation);
2953
2954         if (imp_generation != imp->imp_last_generation_checked ||
2955             imp->imp_last_transno_checked == 0)
2956                 skip_committed_list = false;
2957         /* maybe drop imp_lock here, if another lock protected the lists */
2958
2959         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2960                                  rq_replay_list) {
2961                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2962                 LASSERT(req != last_req);
2963                 last_req = req;
2964
2965                 if (req->rq_transno == 0) {
2966                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2967                         LBUG();
2968                 }
2969
2970                 /* If other threads are waiting on imp_lock, stop processing
2971                  * in this thread. Another thread can finish remaining work.
2972                  * This may happen if there are huge numbers of open files
2973                  * that are closed suddenly or evicted, or if the server
2974                  * commit interval is very high vs. RPC rate.
2975                  */
2976                 if (++replay_scanned % 2048 == 0) {
2977                         now = ktime_get_seconds();
2978                         if (now > start + 5)
2979                                 debug_level = D_WARNING;
2980
2981                         if ((replay_freed > 128 && now > start + 3) &&
2982                             atomic_read(&imp->imp_waiting)) {
2983                                 if (debug_level == D_INFO)
2984                                         debug_level = D_RPCTRACE;
2985                                 break;
2986                         }
2987                 }
2988
2989                 if (req->rq_import_generation < imp_generation) {
2990                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2991                         GOTO(free_req, 0);
2992                 }
2993
2994                 /* not yet committed */
2995                 if (req->rq_transno > peer_committed_transno) {
2996                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2997                         break;
2998                 }
2999
3000                 if (req->rq_replay) {
3001                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
3002                         list_move_tail(&req->rq_replay_list,
3003                                        &imp->imp_committed_list);
3004                         continue;
3005                 }
3006
3007                 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
3008                           peer_committed_transno);
3009 free_req:
3010                 replay_freed++;
3011                 ptlrpc_free_request(req);
3012         }
3013
3014         if (skip_committed_list)
3015                 GOTO(out, 0);
3016
3017         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
3018                                  rq_replay_list) {
3019                 LASSERT(req->rq_transno != 0);
3020
3021                 /* If other threads are waiting on imp_lock, stop processing
3022                  * in this thread. Another thread can finish remaining work. */
3023                 if (++commit_scanned % 2048 == 0) {
3024                         now = ktime_get_seconds();
3025                         if (now > start + 6)
3026                                 debug_level = D_WARNING;
3027
3028                         if ((commit_freed > 128 && now > start + 4) &&
3029                             atomic_read(&imp->imp_waiting)) {
3030                                 if (debug_level == D_INFO)
3031                                         debug_level = D_RPCTRACE;
3032                                 break;
3033                         }
3034                 }
3035
3036                 if (req->rq_import_generation < imp_generation ||
3037                     !req->rq_replay) {
3038                         DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
3039                                   req->rq_import_generation <
3040                                   imp_generation ? "stale" : "closed");
3041
3042                         if (imp->imp_replay_cursor == &req->rq_replay_list)
3043                                 imp->imp_replay_cursor =
3044                                         req->rq_replay_list.next;
3045
3046                         commit_freed++;
3047                         ptlrpc_free_request(req);
3048                 }
3049         }
3050 out:
3051         /* if full lists processed without interruption, avoid next scan */
3052         if (debug_level == D_INFO) {
3053                 imp->imp_last_transno_checked = peer_committed_transno;
3054                 imp->imp_last_generation_checked = imp_generation;
3055         }
3056
3057         CDEBUG_LIMIT(debug_level,
3058                      "%s: %s: skip=%u replay=%u/%u committed=%u/%u\n",
3059                      imp->imp_obd->obd_name,
3060                      debug_level == D_INFO ? "normal" : "overloaded",
3061                      skip_committed_list, replay_freed, replay_scanned,
3062                      commit_freed, commit_scanned);
3063         EXIT;
3064 }
3065
3066 void ptlrpc_cleanup_client(struct obd_import *imp)
3067 {
3068         ENTRY;
3069         EXIT;
3070 }
3071
3072 /**
3073  * Schedule previously sent request for resend.
3074  * For bulk requests we assign new xid (to avoid problems with
3075  * lost replies and therefore several transfers landing into same buffer
3076  * from different sending attempts).
3077  */
3078 void ptlrpc_resend_req(struct ptlrpc_request *req)
3079 {
3080         DEBUG_REQ(D_HA, req, "going to resend");
3081         spin_lock(&req->rq_lock);
3082
3083         /*
3084          * Request got reply but linked to the import list still.
3085          * Let ptlrpc_check_set() process it.
3086          */
3087         if (ptlrpc_client_replied(req)) {
3088                 spin_unlock(&req->rq_lock);
3089                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
3090                 return;
3091         }
3092
3093         req->rq_status = -EAGAIN;
3094
3095         req->rq_resend = 1;
3096         req->rq_net_err = 0;
3097         req->rq_timedout = 0;
3098
3099         ptlrpc_client_wake_req(req);
3100         spin_unlock(&req->rq_lock);
3101 }
3102
3103 /* XXX: this function and rq_status are currently unused */
3104 void ptlrpc_restart_req(struct ptlrpc_request *req)
3105 {
3106         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
3107         req->rq_status = -ERESTARTSYS;
3108
3109         spin_lock(&req->rq_lock);
3110         req->rq_restart = 1;
3111         req->rq_timedout = 0;
3112         ptlrpc_client_wake_req(req);
3113         spin_unlock(&req->rq_lock);
3114 }
3115
3116 /**
3117  * Grab additional reference on a request \a req
3118  */
3119 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
3120 {
3121         ENTRY;
3122         atomic_inc(&req->rq_refcount);
3123         RETURN(req);
3124 }
3125 EXPORT_SYMBOL(ptlrpc_request_addref);
3126
3127 /**
3128  * Add a request to import replay_list.
3129  * Must be called under imp_lock
3130  */
3131 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
3132                                       struct obd_import *imp)
3133 {
3134         struct ptlrpc_request *iter;
3135
3136         assert_spin_locked(&imp->imp_lock);
3137
3138         if (req->rq_transno == 0) {
3139                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
3140                 LBUG();
3141         }
3142
3143         /*
3144          * clear this for new requests that were resent as well
3145          * as resent replayed requests.
3146          */
3147         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3148
3149         /* don't re-add requests that have been replayed */
3150         if (!list_empty(&req->rq_replay_list))
3151                 return;
3152
3153         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
3154
3155         spin_lock(&req->rq_lock);
3156         req->rq_resend = 0;
3157         spin_unlock(&req->rq_lock);
3158
3159         LASSERT(imp->imp_replayable);
3160         /* Balanced in ptlrpc_free_committed, usually. */
3161         ptlrpc_request_addref(req);
3162         list_for_each_entry_reverse(iter, &imp->imp_replay_list,
3163                                     rq_replay_list) {
3164                 /*
3165                  * We may have duplicate transnos if we create and then
3166                  * open a file, or for closes retained if to match creating
3167                  * opens, so use req->rq_xid as a secondary key.
3168                  * (See bugs 684, 685, and 428.)
3169                  * XXX no longer needed, but all opens need transnos!
3170                  */
3171                 if (iter->rq_transno > req->rq_transno)
3172                         continue;
3173
3174                 if (iter->rq_transno == req->rq_transno) {
3175                         LASSERT(iter->rq_xid != req->rq_xid);
3176                         if (iter->rq_xid > req->rq_xid)
3177                                 continue;
3178                 }
3179
3180                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
3181                 return;
3182         }
3183
3184         list_add(&req->rq_replay_list, &imp->imp_replay_list);
3185 }
3186
3187 /**
3188  * Send request and wait until it completes.
3189  * Returns request processing status.
3190  */
3191 int ptlrpc_queue_wait(struct ptlrpc_request *req)
3192 {
3193         struct ptlrpc_request_set *set;
3194         int rc;
3195
3196         ENTRY;
3197         LASSERT(req->rq_set == NULL);
3198         LASSERT(!req->rq_receiving_reply);
3199
3200         set = ptlrpc_prep_set();
3201         if (!set) {
3202                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
3203                 RETURN(-ENOMEM);
3204         }
3205
3206         /* for distributed debugging */
3207         lustre_msg_set_status(req->rq_reqmsg, current->pid);
3208
3209         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
3210         ptlrpc_request_addref(req);
3211         ptlrpc_set_add_req(set, req);
3212         rc = ptlrpc_set_wait(NULL, set);
3213         ptlrpc_set_destroy(set);
3214
3215         RETURN(rc);
3216 }
3217 EXPORT_SYMBOL(ptlrpc_queue_wait);
3218
3219 /**
3220  * Callback used for replayed requests reply processing.
3221  * In case of successful reply calls registered request replay callback.
3222  * In case of error restart replay process.
3223  */
3224 static int ptlrpc_replay_interpret(const struct lu_env *env,
3225                                    struct ptlrpc_request *req,
3226                                    void *args, int rc)
3227 {
3228         struct ptlrpc_replay_async_args *aa = args;
3229         struct obd_import *imp = req->rq_import;
3230
3231         ENTRY;
3232         atomic_dec(&imp->imp_replay_inflight);
3233
3234         /*
3235          * Note: if it is bulk replay (MDS-MDS replay), then even if
3236          * server got the request, but bulk transfer timeout, let's
3237          * replay the bulk req again
3238          */
3239         if (!ptlrpc_client_replied(req) ||
3240             (req->rq_bulk &&
3241              lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
3242                 DEBUG_REQ(D_ERROR, req, "request replay timed out");
3243                 GOTO(out, rc = -ETIMEDOUT);
3244         }
3245
3246         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
3247             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
3248             lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
3249                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
3250
3251         /** VBR: check version failure */
3252         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
3253                 /** replay was failed due to version mismatch */
3254                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay");
3255                 spin_lock(&imp->imp_lock);
3256                 imp->imp_vbr_failed = 1;
3257                 spin_unlock(&imp->imp_lock);
3258                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3259         } else {
3260                 /** The transno had better not change over replay. */
3261                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
3262                          lustre_msg_get_transno(req->rq_repmsg) ||
3263                          lustre_msg_get_transno(req->rq_repmsg) == 0,
3264                          "%#llx/%#llx\n",
3265                          lustre_msg_get_transno(req->rq_reqmsg),
3266                          lustre_msg_get_transno(req->rq_repmsg));
3267         }
3268
3269         spin_lock(&imp->imp_lock);
3270         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
3271         spin_unlock(&imp->imp_lock);
3272         LASSERT(imp->imp_last_replay_transno);
3273
3274         /* transaction number shouldn't be bigger than the latest replayed */
3275         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
3276                 DEBUG_REQ(D_ERROR, req,
3277                           "Reported transno=%llu is bigger than replayed=%llu",
3278                           req->rq_transno,
3279                           lustre_msg_get_transno(req->rq_reqmsg));
3280                 GOTO(out, rc = -EINVAL);
3281         }
3282
3283         DEBUG_REQ(D_HA, req, "got reply");
3284
3285         /* let the callback do fixups, possibly including in the request */
3286         if (req->rq_replay_cb)
3287                 req->rq_replay_cb(req);
3288
3289         if (ptlrpc_client_replied(req) &&
3290             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
3291                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
3292                           lustre_msg_get_status(req->rq_repmsg),
3293                           aa->praa_old_status);
3294
3295                 /*
3296                  * Note: If the replay fails for MDT-MDT recovery, let's
3297                  * abort all of the following requests in the replay
3298                  * and sending list, because MDT-MDT update requests
3299                  * are dependent on each other, see LU-7039
3300                  */
3301                 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
3302                         struct ptlrpc_request *free_req;
3303                         struct ptlrpc_request *tmp;
3304
3305                         spin_lock(&imp->imp_lock);
3306                         list_for_each_entry_safe(free_req, tmp,
3307                                                  &imp->imp_replay_list,
3308                                                  rq_replay_list) {
3309                                 ptlrpc_free_request(free_req);
3310                         }
3311
3312                         list_for_each_entry_safe(free_req, tmp,
3313                                                  &imp->imp_committed_list,
3314                                                  rq_replay_list) {
3315                                 ptlrpc_free_request(free_req);
3316                         }
3317
3318                         list_for_each_entry_safe(free_req, tmp,
3319                                                  &imp->imp_delayed_list,
3320                                                  rq_list) {
3321                                 spin_lock(&free_req->rq_lock);
3322                                 free_req->rq_err = 1;
3323                                 free_req->rq_status = -EIO;
3324                                 ptlrpc_client_wake_req(free_req);
3325                                 spin_unlock(&free_req->rq_lock);
3326                         }
3327
3328                         list_for_each_entry_safe(free_req, tmp,
3329                                                  &imp->imp_sending_list,
3330                                                  rq_list) {
3331                                 spin_lock(&free_req->rq_lock);
3332                                 free_req->rq_err = 1;
3333                                 free_req->rq_status = -EIO;
3334                                 ptlrpc_client_wake_req(free_req);
3335                                 spin_unlock(&free_req->rq_lock);
3336                         }
3337                         spin_unlock(&imp->imp_lock);
3338                 }
3339         } else {
3340                 /* Put it back for re-replay. */
3341                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3342         }
3343
3344         /*
3345          * Errors while replay can set transno to 0, but
3346          * imp_last_replay_transno shouldn't be set to 0 anyway
3347          */
3348         if (req->rq_transno == 0)
3349                 CERROR("Transno is 0 during replay!\n");
3350
3351         /* continue with recovery */
3352         rc = ptlrpc_import_recovery_state_machine(imp);
3353  out:
3354         req->rq_send_state = aa->praa_old_state;
3355
3356         if (rc != 0)
3357                 /* this replay failed, so restart recovery */
3358                 ptlrpc_connect_import(imp);
3359
3360         RETURN(rc);
3361 }
3362
3363 /**
3364  * Prepares and queues request for replay.
3365  * Adds it to ptlrpcd queue for actual sending.
3366  * Returns 0 on success.
3367  */
3368 int ptlrpc_replay_req(struct ptlrpc_request *req)
3369 {
3370         struct ptlrpc_replay_async_args *aa;
3371
3372         ENTRY;
3373
3374         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
3375
3376         CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_REPLAY_PAUSE, cfs_fail_val);
3377
3378         aa = ptlrpc_req_async_args(aa, req);
3379         memset(aa, 0, sizeof(*aa));
3380
3381         /* Prepare request to be resent with ptlrpcd */
3382         aa->praa_old_state = req->rq_send_state;
3383         req->rq_send_state = LUSTRE_IMP_REPLAY;
3384         req->rq_phase = RQ_PHASE_NEW;
3385         req->rq_next_phase = RQ_PHASE_UNDEFINED;
3386         if (req->rq_repmsg)
3387                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
3388         req->rq_status = 0;
3389         req->rq_interpret_reply = ptlrpc_replay_interpret;
3390         /* Readjust the timeout for current conditions */
3391         ptlrpc_at_set_req_timeout(req);
3392
3393         /* Tell server net_latency to calculate how long to wait for reply. */
3394         lustre_msg_set_service_timeout(req->rq_reqmsg,
3395                                        ptlrpc_at_get_net_latency(req));
3396         DEBUG_REQ(D_HA, req, "REPLAY");
3397
3398         atomic_inc(&req->rq_import->imp_replay_inflight);
3399         spin_lock(&req->rq_lock);
3400         req->rq_early_free_repbuf = 0;
3401         spin_unlock(&req->rq_lock);
3402         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
3403
3404         ptlrpcd_add_req(req);
3405         RETURN(0);
3406 }
3407
3408 /**
3409  * Aborts all in-flight request on import \a imp sending and delayed lists
3410  */
3411 void ptlrpc_abort_inflight(struct obd_import *imp)
3412 {
3413         struct ptlrpc_request *req;
3414         ENTRY;
3415
3416         /*
3417          * Make sure that no new requests get processed for this import.
3418          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
3419          * this flag and then putting requests on sending_list or delayed_list.
3420          */
3421         assert_spin_locked(&imp->imp_lock);
3422
3423         /*
3424          * XXX locking?  Maybe we should remove each request with the list
3425          * locked?  Also, how do we know if the requests on the list are
3426          * being freed at this time?
3427          */
3428         list_for_each_entry(req, &imp->imp_sending_list, rq_list) {
3429                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
3430
3431                 spin_lock(&req->rq_lock);
3432                 if (req->rq_import_generation < imp->imp_generation) {
3433                         req->rq_err = 1;
3434                         req->rq_status = -EIO;
3435                         ptlrpc_client_wake_req(req);
3436                 }
3437                 spin_unlock(&req->rq_lock);
3438         }
3439
3440         list_for_each_entry(req, &imp->imp_delayed_list, rq_list) {
3441                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
3442
3443                 spin_lock(&req->rq_lock);
3444                 if (req->rq_import_generation < imp->imp_generation) {
3445                         req->rq_err = 1;
3446                         req->rq_status = -EIO;
3447                         ptlrpc_client_wake_req(req);
3448                 }
3449                 spin_unlock(&req->rq_lock);
3450         }
3451
3452         /*
3453          * Last chance to free reqs left on the replay list, but we
3454          * will still leak reqs that haven't committed.
3455          */
3456         if (imp->imp_replayable)
3457                 ptlrpc_free_committed(imp);
3458
3459         EXIT;
3460 }
3461
3462 /**
3463  * Abort all uncompleted requests in request set \a set
3464  */
3465 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
3466 {
3467         struct ptlrpc_request *req;
3468
3469         LASSERT(set != NULL);
3470
3471         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
3472                 spin_lock(&req->rq_lock);
3473                 if (req->rq_phase != RQ_PHASE_RPC) {
3474                         spin_unlock(&req->rq_lock);
3475                         continue;
3476                 }
3477
3478                 req->rq_err = 1;
3479                 req->rq_status = -EINTR;
3480                 ptlrpc_client_wake_req(req);
3481                 spin_unlock(&req->rq_lock);
3482         }
3483 }
3484
3485 /**
3486  * Initialize the XID for the node.  This is common among all requests on
3487  * this node, and only requires the property that it is monotonically
3488  * increasing.  It does not need to be sequential.  Since this is also used
3489  * as the RDMA match bits, it is important that a single client NOT have
3490  * the same match bits for two different in-flight requests, hence we do
3491  * NOT want to have an XID per target or similar.
3492  *
3493  * To avoid an unlikely collision between match bits after a client reboot
3494  * (which would deliver old data into the wrong RDMA buffer) initialize
3495  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
3496  * If the time is clearly incorrect, we instead use a 62-bit random number.
3497  * In the worst case the random number will overflow 1M RPCs per second in
3498  * 9133 years, or permutations thereof.
3499  */
3500 #define YEAR_2004 (1ULL << 30)
3501 void ptlrpc_init_xid(void)
3502 {
3503         time64_t now = ktime_get_real_seconds();
3504         u64 xid;
3505
3506         if (now < YEAR_2004) {
3507                 get_random_bytes(&xid, sizeof(xid));
3508                 xid >>= 2;
3509                 xid |= (1ULL << 61);
3510         } else {
3511                 xid = (u64)now << 20;
3512         }
3513
3514         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
3515         BUILD_BUG_ON((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) !=
3516                      0);
3517         xid &= PTLRPC_BULK_OPS_MASK;
3518         atomic64_set(&ptlrpc_last_xid, xid);
3519 }
3520
3521 /**
3522  * Increase xid and returns resulting new value to the caller.
3523  *
3524  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
3525  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
3526  * itself uses the last bulk xid needed, so the server can determine the
3527  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
3528  * xid must align to a power-of-two value.
3529  *
3530  * This is assumed to be true due to the initial ptlrpc_last_xid
3531  * value also being initialized to a power-of-two value. LU-1431
3532  */
3533 __u64 ptlrpc_next_xid(void)
3534 {
3535         return atomic64_add_return(PTLRPC_BULK_OPS_COUNT, &ptlrpc_last_xid);
3536 }
3537
3538 /**
3539  * If request has a new allocated XID (new request or EINPROGRESS resend),
3540  * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3541  * request to ensure previous bulk fails and avoid problems with lost replies
3542  * and therefore several transfers landing into the same buffer from different
3543  * sending attempts.
3544  * Also, to avoid previous reply landing to a different sending attempt.
3545  */
3546 void ptlrpc_set_mbits(struct ptlrpc_request *req)
3547 {
3548         int md_count = req->rq_bulk ? req->rq_bulk->bd_md_count : 1;
3549
3550         /*
3551          * Generate new matchbits for all resend requests, including
3552          * resend replay.
3553          */
3554         if (req->rq_resend) {
3555                 __u64 old_mbits = req->rq_mbits;
3556
3557                 /*
3558                  * First time resend on -EINPROGRESS will generate new xid,
3559                  * so we can actually use the rq_xid as rq_mbits in such case,
3560                  * however, it's bit hard to distinguish such resend with a
3561                  * 'resend for the -EINPROGRESS resend'. To make it simple,
3562                  * we opt to generate mbits for all resend cases.
3563                  */
3564                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3565                                  BULK_MBITS)) {
3566                         req->rq_mbits = ptlrpc_next_xid();
3567                 } else {
3568                         /*
3569                          * Old version transfers rq_xid to peer as
3570                          * matchbits.
3571                          */
3572                         spin_lock(&req->rq_import->imp_lock);
3573                         list_del_init(&req->rq_unreplied_list);
3574                         ptlrpc_assign_next_xid_nolock(req);
3575                         spin_unlock(&req->rq_import->imp_lock);
3576                         req->rq_mbits = req->rq_xid;
3577                 }
3578                 CDEBUG(D_HA, "resend with new mbits old x%llu new x%llu\n",
3579                        old_mbits, req->rq_mbits);
3580         } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
3581                 /* Request being sent first time, use xid as matchbits. */
3582                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3583                                  BULK_MBITS) || req->rq_mbits == 0)
3584                 {
3585                         req->rq_mbits = req->rq_xid;
3586                 } else {
3587                         req->rq_mbits -= md_count - 1;
3588                 }
3589         } else {
3590                 /*
3591                  * Replay request, xid and matchbits have already been
3592                  * correctly assigned.
3593                  */
3594                 return;
3595         }
3596
3597         /*
3598          * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3599          * that server can infer the number of bulks that were prepared,
3600          * see LU-1431
3601          */
3602         req->rq_mbits += md_count - 1;
3603
3604         /*
3605          * Set rq_xid as rq_mbits to indicate the final bulk for the old
3606          * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
3607          *
3608          * It's ok to directly set the rq_xid here, since this xid bump
3609          * won't affect the request position in unreplied list.
3610          */
3611         if (!OCD_HAS_FLAG(&req->rq_import->imp_connect_data, BULK_MBITS))
3612                 req->rq_xid = req->rq_mbits;
3613 }
3614
3615 /**
3616  * Get a glimpse at what next xid value might have been.
3617  * Returns possible next xid.
3618  */
3619 __u64 ptlrpc_sample_next_xid(void)
3620 {
3621         return atomic64_read(&ptlrpc_last_xid) + PTLRPC_BULK_OPS_COUNT;
3622 }
3623 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3624
3625 /**
3626  * Functions for operating ptlrpc workers.
3627  *
3628  * A ptlrpc work is a function which will be running inside ptlrpc context.
3629  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3630  *
3631  * 1. after a work is created, it can be used many times, that is:
3632  *         handler = ptlrpcd_alloc_work();
3633  *         ptlrpcd_queue_work();
3634  *
3635  *    queue it again when necessary:
3636  *         ptlrpcd_queue_work();
3637  *         ptlrpcd_destroy_work();
3638  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3639  *    it will only be queued once in any time. Also as its name implies, it may
3640  *    have delay before it really runs by ptlrpcd thread.
3641  */
3642 struct ptlrpc_work_async_args {
3643         int (*cb)(const struct lu_env *, void *);
3644         void *cbdata;
3645 };
3646
3647 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3648 {
3649         /* re-initialize the req */
3650         req->rq_timeout         = obd_timeout;
3651         req->rq_sent            = ktime_get_real_seconds();
3652         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3653         req->rq_phase           = RQ_PHASE_INTERPRET;
3654         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3655         req->rq_xid             = ptlrpc_next_xid();
3656         req->rq_import_generation = req->rq_import->imp_generation;
3657
3658         ptlrpcd_add_req(req);
3659 }
3660
3661 static int work_interpreter(const struct lu_env *env,
3662                             struct ptlrpc_request *req, void *args, int rc)
3663 {
3664         struct ptlrpc_work_async_args *arg = args;
3665
3666         LASSERT(ptlrpcd_check_work(req));
3667         LASSERT(arg->cb != NULL);
3668
3669         rc = arg->cb(env, arg->cbdata);
3670
3671         list_del_init(&req->rq_set_chain);
3672         req->rq_set = NULL;
3673
3674         if (atomic_dec_return(&req->rq_refcount) > 1) {
3675                 atomic_set(&req->rq_refcount, 2);
3676                 ptlrpcd_add_work_req(req);
3677         }
3678         return rc;
3679 }
3680
3681 static int worker_format;
3682
3683 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3684 {
3685         return req->rq_pill.rc_fmt == (void *)&worker_format;
3686 }
3687
3688 /**
3689  * Create a work for ptlrpc.
3690  */
3691 void *ptlrpcd_alloc_work(struct obd_import *imp,
3692                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3693 {
3694         struct ptlrpc_request *req = NULL;
3695         struct ptlrpc_work_async_args *args;
3696
3697         ENTRY;
3698         might_sleep();
3699
3700         if (!cb)
3701                 RETURN(ERR_PTR(-EINVAL));
3702
3703         /* copy some code from deprecated fakereq. */
3704         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3705         if (!req) {
3706                 CERROR("ptlrpc: run out of memory!\n");
3707                 RETURN(ERR_PTR(-ENOMEM));
3708         }
3709
3710         ptlrpc_cli_req_init(req);
3711
3712         req->rq_send_state = LUSTRE_IMP_FULL;
3713         req->rq_type = PTL_RPC_MSG_REQUEST;
3714         req->rq_import = class_import_get(imp);
3715         req->rq_interpret_reply = work_interpreter;
3716         /* don't want reply */
3717         req->rq_no_delay = req->rq_no_resend = 1;
3718         req->rq_pill.rc_fmt = (void *)&worker_format;
3719
3720         args = ptlrpc_req_async_args(args, req);
3721         args->cb     = cb;
3722         args->cbdata = cbdata;
3723
3724         RETURN(req);
3725 }
3726 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3727
3728 void ptlrpcd_destroy_work(void *handler)
3729 {
3730         struct ptlrpc_request *req = handler;
3731
3732         if (req)
3733                 ptlrpc_req_finished(req);
3734 }
3735 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3736
3737 int ptlrpcd_queue_work(void *handler)
3738 {
3739         struct ptlrpc_request *req = handler;
3740
3741         /*
3742          * Check if the req is already being queued.
3743          *
3744          * Here comes a trick: it lacks a way of checking if a req is being
3745          * processed reliably in ptlrpc. Here I have to use refcount of req
3746          * for this purpose. This is okay because the caller should use this
3747          * req as opaque data. - Jinshan
3748          */
3749         LASSERT(atomic_read(&req->rq_refcount) > 0);
3750         if (atomic_inc_return(&req->rq_refcount) == 2)
3751                 ptlrpcd_add_work_req(req);
3752         return 0;
3753 }
3754 EXPORT_SYMBOL(ptlrpcd_queue_work);