Whamcloud - gitweb
LU-10215 tests: remove disk2_4 disk2_5 images
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 /** Implementation of client-side PortalRPC interfaces */
33
34 #define DEBUG_SUBSYSTEM S_RPC
35
36 #include <linux/delay.h>
37 #include <linux/random.h>
38
39 #include <lnet/lib-lnet.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_lib.h>
43 #include <lustre_ha.h>
44 #include <lustre_import.h>
45 #include <lustre_req_layout.h>
46
47 #include "ptlrpc_internal.h"
48
49 static void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
50                                       struct page *page, int pageoffset,
51                                       int len)
52 {
53         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
54 }
55
56 static void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
57                                         struct page *page, int pageoffset,
58                                         int len)
59 {
60         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
61 }
62
63 static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
64 {
65         int i;
66
67         for (i = 0; i < desc->bd_iov_count ; i++)
68                 put_page(desc->bd_vec[i].bv_page);
69 }
70
71 static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
72                                        void *frag, int len)
73 {
74         unsigned int offset = (unsigned long)frag & ~PAGE_MASK;
75
76         ENTRY;
77         while (len > 0) {
78                 int page_len = min_t(unsigned int, PAGE_SIZE - offset,
79                                      len);
80                 struct page *p;
81
82                 if (!is_vmalloc_addr(frag))
83                         p = virt_to_page((unsigned long)frag);
84                 else
85                         p = vmalloc_to_page(frag);
86                 ptlrpc_prep_bulk_page_nopin(desc, p, offset, page_len);
87                 offset = 0;
88                 len -= page_len;
89                 frag += page_len;
90         }
91
92         RETURN(desc->bd_nob);
93 }
94
95 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
96         .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
97         .release_frags  = ptlrpc_release_bulk_page_pin,
98 };
99 EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
100
101 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
102         .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
103         .release_frags  = ptlrpc_release_bulk_noop,
104         .add_iov_frag   = ptlrpc_prep_bulk_frag_pages,
105 };
106 EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
107
108 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
109 static int ptlrpcd_check_work(struct ptlrpc_request *req);
110 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
111
112 /**
113  * Initialize passed in client structure \a cl.
114  */
115 void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
116                         struct ptlrpc_client *cl)
117 {
118         cl->cli_request_portal = req_portal;
119         cl->cli_reply_portal   = rep_portal;
120         cl->cli_name           = name;
121 }
122 EXPORT_SYMBOL(ptlrpc_init_client);
123
124 /**
125  * Return PortalRPC connection for remore uud \a uuid
126  */
127 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
128                                                     u32 refnet)
129 {
130         struct ptlrpc_connection *c;
131         struct lnet_nid self;
132         struct lnet_processid peer;
133         int err;
134
135         /*
136          * ptlrpc_uuid_to_peer() initializes its 2nd parameter
137          * before accessing its values.
138          */
139         err = ptlrpc_uuid_to_peer(uuid, &peer, &self, refnet);
140         if (err != 0) {
141                 CNETERR("cannot find peer %s!\n", uuid->uuid);
142                 return NULL;
143         }
144
145         c = ptlrpc_connection_get(&peer, &self, uuid);
146         if (c) {
147                 memcpy(c->c_remote_uuid.uuid,
148                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
149         }
150
151         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
152
153         return c;
154 }
155
156 /**
157  * Allocate and initialize new bulk descriptor on the sender.
158  * Returns pointer to the descriptor or NULL on error.
159  */
160 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
161                                          unsigned int max_brw,
162                                          enum ptlrpc_bulk_op_type type,
163                                          unsigned int portal,
164                                          const struct ptlrpc_bulk_frag_ops *ops)
165 {
166         struct ptlrpc_bulk_desc *desc;
167         int i;
168
169         LASSERT(ops->add_kiov_frag != NULL);
170
171         if (max_brw > PTLRPC_BULK_OPS_COUNT)
172                 RETURN(NULL);
173
174         if (nfrags > LNET_MAX_IOV * max_brw)
175                 RETURN(NULL);
176
177         OBD_ALLOC_PTR(desc);
178         if (!desc)
179                 return NULL;
180
181         OBD_ALLOC_LARGE(desc->bd_vec,
182                         nfrags * sizeof(*desc->bd_vec));
183         if (!desc->bd_vec)
184                 goto out;
185
186         spin_lock_init(&desc->bd_lock);
187         init_waitqueue_head(&desc->bd_waitq);
188         desc->bd_max_iov = nfrags;
189         desc->bd_iov_count = 0;
190         desc->bd_portal = portal;
191         desc->bd_type = type;
192         desc->bd_md_count = 0;
193         desc->bd_nob_last = LNET_MTU;
194         desc->bd_frag_ops = ops;
195         LASSERT(max_brw > 0);
196         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
197         /*
198          * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
199          * node. Negotiated ocd_brw_size will always be <= this number.
200          */
201         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
202                 LNetInvalidateMDHandle(&desc->bd_mds[i]);
203
204         return desc;
205 out:
206         OBD_FREE_PTR(desc);
207         return NULL;
208 }
209
210 /**
211  * Prepare bulk descriptor for specified outgoing request \a req that
212  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
213  * the bulk to be sent. Used on client-side.
214  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
215  * error.
216  */
217 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
218                                               unsigned int nfrags,
219                                               unsigned int max_brw,
220                                               unsigned int type,
221                                               unsigned int portal,
222                                               const struct ptlrpc_bulk_frag_ops
223                                                 *ops)
224 {
225         struct obd_import *imp = req->rq_import;
226         struct ptlrpc_bulk_desc *desc;
227
228         ENTRY;
229         LASSERT(ptlrpc_is_bulk_op_passive(type));
230
231         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
232         if (!desc)
233                 RETURN(NULL);
234
235         desc->bd_import = class_import_get(imp);
236         desc->bd_req = req;
237
238         desc->bd_cbid.cbid_fn  = client_bulk_callback;
239         desc->bd_cbid.cbid_arg = desc;
240
241         /* This makes req own desc, and free it when she frees herself */
242         req->rq_bulk = desc;
243
244         return desc;
245 }
246 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
247
248 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
249                              struct page *page, int pageoffset, int len,
250                              int pin)
251 {
252         struct bio_vec *kiov;
253
254         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
255         LASSERT(page != NULL);
256         LASSERT(pageoffset >= 0);
257         LASSERT(len > 0);
258         LASSERT(pageoffset + len <= PAGE_SIZE);
259
260         kiov = &desc->bd_vec[desc->bd_iov_count];
261
262         if (((desc->bd_iov_count % LNET_MAX_IOV) == 0) ||
263              ((desc->bd_nob_last + len) > LNET_MTU)) {
264                 desc->bd_mds_off[desc->bd_md_count] = desc->bd_iov_count;
265                 desc->bd_md_count++;
266                 desc->bd_nob_last = 0;
267                 LASSERT(desc->bd_md_count <= PTLRPC_BULK_OPS_COUNT);
268         }
269
270         desc->bd_nob_last += len;
271         desc->bd_nob += len;
272
273         if (pin)
274                 get_page(page);
275
276         kiov->bv_page = page;
277         kiov->bv_offset = pageoffset;
278         kiov->bv_len = len;
279
280         desc->bd_iov_count++;
281 }
282 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
283
284 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
285 {
286         ENTRY;
287
288         LASSERT(desc != NULL);
289         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
290         LASSERT(desc->bd_refs == 0);         /* network hands off */
291         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
292         LASSERT(desc->bd_frag_ops != NULL);
293
294         sptlrpc_pool_put_desc_pages(desc);
295
296         if (desc->bd_export)
297                 class_export_put(desc->bd_export);
298         else
299                 class_import_put(desc->bd_import);
300
301         if (desc->bd_frag_ops->release_frags != NULL)
302                 desc->bd_frag_ops->release_frags(desc);
303
304         OBD_FREE_LARGE(desc->bd_vec,
305                        desc->bd_max_iov * sizeof(*desc->bd_vec));
306         OBD_FREE_PTR(desc);
307         EXIT;
308 }
309 EXPORT_SYMBOL(ptlrpc_free_bulk);
310
311 /**
312  * Set server timelimit for this req, i.e. how long are we willing to wait
313  * for reply before timing out this request.
314  */
315 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
316 {
317         struct obd_device *obd;
318
319         LASSERT(req->rq_import);
320         obd = req->rq_import->imp_obd;
321
322         if (obd_at_off(obd)) {
323                 /* non-AT settings */
324                 /**
325                  * \a imp_server_timeout means this is reverse import and
326                  * we send (currently only) ASTs to the client and cannot afford
327                  * to wait too long for the reply, otherwise the other client
328                  * (because of which we are sending this request) would
329                  * timeout waiting for us
330                  */
331                 req->rq_timeout = req->rq_import->imp_server_timeout ?
332                                   obd_timeout / 2 : obd_timeout;
333         } else {
334                 struct imp_at *at = &req->rq_import->imp_at;
335                 timeout_t serv_est;
336                 int idx;
337
338                 idx = import_at_get_index(req->rq_import,
339                                           req->rq_request_portal);
340                 serv_est = obd_at_get(obd, &at->iat_service_estimate[idx]);
341                 /*
342                  * Currently a 32 bit value is sent over the
343                  * wire for rq_timeout so please don't change this
344                  * to time64_t. The work for LU-1158 will in time
345                  * replace rq_timeout with a 64 bit nanosecond value
346                  */
347                 req->rq_timeout = at_est2timeout(serv_est);
348         }
349         /*
350          * We could get even fancier here, using history to predict increased
351          * loading...
352          *
353          * Let the server know what this RPC timeout is by putting it in the
354          * reqmsg
355          */
356         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
357 }
358 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
359
360 /* Adjust max service estimate based on server value */
361 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
362                                   timeout_t serv_est)
363 {
364         int idx;
365         timeout_t oldse;
366         struct imp_at *at;
367         struct obd_device *obd;
368
369         LASSERT(req->rq_import);
370         obd = req->rq_import->imp_obd;
371         at = &req->rq_import->imp_at;
372
373         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
374         /*
375          * max service estimates are tracked on the server side,
376          * so just keep minimal history here
377          */
378         oldse = obd_at_measure(obd, &at->iat_service_estimate[idx], serv_est);
379         if (oldse != 0) {
380                 unsigned int at_est = obd_at_get(obd,
381                                                 &at->iat_service_estimate[idx]);
382                 CDEBUG(D_ADAPTTO,
383                        "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
384                        req->rq_import->imp_obd->obd_name,
385                        req->rq_request_portal,
386                        oldse, at_est);
387         }
388 }
389
390 /**
391  * Returns Expected network latency per remote node (secs).
392  *
393  * \param[in] req       ptlrpc request
394  *
395  * \retval      0 if AT(Adaptive Timeout) is off
396  * \retval      >0 (iat_net_latency) latency per node
397  */
398 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
399 {
400         struct obd_device *obd = req->rq_import->imp_obd;
401
402         return obd_at_off(obd) ?
403                0 : obd_at_get(obd, &req->rq_import->imp_at.iat_net_latency);
404 }
405
406 /* Adjust expected network latency */
407 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
408                                timeout_t service_timeout)
409 {
410         time64_t now = ktime_get_real_seconds();
411         struct imp_at *at;
412         timeout_t oldnl;
413         timeout_t nl;
414         struct obd_device *obd;
415
416         LASSERT(req->rq_import);
417         obd = req->rq_import->imp_obd;
418
419         if (service_timeout > now - req->rq_sent + 3) {
420                 /*
421                  * b=16408, however, this can also happen if early reply
422                  * is lost and client RPC is expired and resent, early reply
423                  * or reply of original RPC can still be fit in reply buffer
424                  * of resent RPC, now client is measuring time from the
425                  * resent time, but server sent back service time of original
426                  * RPC.
427                  */
428                 CDEBUG_LIMIT((lustre_msg_get_flags(req->rq_reqmsg) &
429                               MSG_RESENT) ?  D_ADAPTTO : D_WARNING,
430                              "Reported service time %u > total measured time %lld\n",
431                              service_timeout, now - req->rq_sent);
432                 return;
433         }
434
435         /* Network latency is total time less server processing time,
436          * st rounding
437          */
438         nl = max_t(timeout_t, now - req->rq_sent - service_timeout, 0) + 1;
439         at = &req->rq_import->imp_at;
440
441         oldnl = obd_at_measure(obd, &at->iat_net_latency, nl);
442         if (oldnl != 0) {
443                 timeout_t timeout = obd_at_get(obd, &at->iat_net_latency);
444
445                 CDEBUG(D_ADAPTTO,
446                        "The network latency for %s (nid %s) has changed from %d to %d\n",
447                        req->rq_import->imp_obd->obd_name,
448                        obd_uuid2str(&req->rq_import->imp_connection->c_remote_uuid),
449                        oldnl, timeout);
450         }
451 }
452
453 static int unpack_reply(struct ptlrpc_request *req)
454 {
455         int rc;
456
457         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
458                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
459                 if (rc) {
460                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: rc = %d",
461                                   rc);
462                         return -EPROTO;
463                 }
464         }
465
466         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
467         if (rc) {
468                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: rc = %d",
469                           rc);
470                 return -EPROTO;
471         }
472         return 0;
473 }
474
475 /**
476  * Handle an early reply message, called with the rq_lock held.
477  * If anything goes wrong just ignore it - same as if it never happened
478  */
479 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
480 __must_hold(&req->rq_lock)
481 {
482         struct ptlrpc_request *early_req;
483         timeout_t service_timeout;
484         time64_t olddl;
485         int rc;
486
487         ENTRY;
488         req->rq_early = 0;
489         spin_unlock(&req->rq_lock);
490
491         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
492         if (rc) {
493                 spin_lock(&req->rq_lock);
494                 RETURN(rc);
495         }
496
497         rc = unpack_reply(early_req);
498         if (rc != 0) {
499                 sptlrpc_cli_finish_early_reply(early_req);
500                 spin_lock(&req->rq_lock);
501                 RETURN(rc);
502         }
503
504         /*
505          * Use new timeout value just to adjust the local value for this
506          * request, don't include it into at_history. It is unclear yet why
507          * service time increased and should it be counted or skipped, e.g.
508          * that can be recovery case or some error or server, the real reply
509          * will add all new data if it is worth to add.
510          */
511         req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
512         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
513
514         /* Network latency can be adjusted, it is pure network delays */
515         service_timeout = lustre_msg_get_service_timeout(early_req->rq_repmsg);
516         ptlrpc_at_adj_net_latency(req, service_timeout);
517
518         sptlrpc_cli_finish_early_reply(early_req);
519
520         spin_lock(&req->rq_lock);
521         olddl = req->rq_deadline;
522         /*
523          * server assumes it now has rq_timeout from when the request
524          * arrived, so the client should give it at least that long.
525          * since we don't know the arrival time we'll use the original
526          * sent time
527          */
528         req->rq_deadline = req->rq_sent + req->rq_timeout +
529                            ptlrpc_at_get_net_latency(req);
530
531         /* The below message is checked in replay-single.sh test_65{a,b} */
532         /* The below message is checked in sanity-{gss,krb5} test_8 */
533         DEBUG_REQ(D_ADAPTTO, req,
534                   "Early reply #%d, new deadline in %llds (%llds)",
535                   req->rq_early_count,
536                   req->rq_deadline - ktime_get_real_seconds(),
537                   req->rq_deadline - olddl);
538
539         RETURN(rc);
540 }
541
542 static struct kmem_cache *request_cache;
543
544 int ptlrpc_request_cache_init(void)
545 {
546         request_cache = kmem_cache_create("ptlrpc_cache",
547                                           sizeof(struct ptlrpc_request),
548                                           0, SLAB_HWCACHE_ALIGN, NULL);
549         return request_cache ? 0 : -ENOMEM;
550 }
551
552 void ptlrpc_request_cache_fini(void)
553 {
554         kmem_cache_destroy(request_cache);
555 }
556
557 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
558 {
559         struct ptlrpc_request *req;
560
561         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
562         return req;
563 }
564
565 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
566 {
567         OBD_SLAB_FREE_PTR(req, request_cache);
568 }
569
570 /**
571  * Wind down request pool \a pool.
572  * Frees all requests from the pool too
573  */
574 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
575 {
576         struct ptlrpc_request *req;
577
578         LASSERT(pool != NULL);
579
580         spin_lock(&pool->prp_lock);
581         while ((req = list_first_entry_or_null(&pool->prp_req_list,
582                                                struct ptlrpc_request,
583                                                rq_list))) {
584                 list_del(&req->rq_list);
585                 LASSERT(req->rq_reqbuf);
586                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
587                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
588                 ptlrpc_request_cache_free(req);
589         }
590         spin_unlock(&pool->prp_lock);
591         OBD_FREE(pool, sizeof(*pool));
592 }
593 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
594
595 /**
596  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
597  */
598 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
599 {
600         int i;
601         int size = 1;
602
603         while (size < pool->prp_rq_size)
604                 size <<= 1;
605
606         LASSERTF(list_empty(&pool->prp_req_list) ||
607                  size == pool->prp_rq_size,
608                  "Trying to change pool size with nonempty pool from %d to %d bytes\n",
609                  pool->prp_rq_size, size);
610
611         pool->prp_rq_size = size;
612         for (i = 0; i < num_rq; i++) {
613                 struct ptlrpc_request *req;
614                 struct lustre_msg *msg;
615
616                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
617                 if (!req)
618                         return i;
619                 OBD_ALLOC_LARGE(msg, size);
620                 if (!msg) {
621                         ptlrpc_request_cache_free(req);
622                         return i;
623                 }
624                 req->rq_reqbuf = msg;
625                 req->rq_reqbuf_len = size;
626                 req->rq_pool = pool;
627                 spin_lock(&pool->prp_lock);
628                 list_add_tail(&req->rq_list, &pool->prp_req_list);
629                 spin_unlock(&pool->prp_lock);
630         }
631         return num_rq;
632 }
633 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
634
635 /**
636  * Create and initialize new request pool with given attributes:
637  * \a num_rq - initial number of requests to create for the pool
638  * \a msgsize - maximum message size possible for requests in thid pool
639  * \a populate_pool - function to be called when more requests need to be added
640  *                    to the pool
641  * Returns pointer to newly created pool or NULL on error.
642  */
643 struct ptlrpc_request_pool *
644 ptlrpc_init_rq_pool(int num_rq, int msgsize,
645                     int (*populate_pool)(struct ptlrpc_request_pool *, int))
646 {
647         struct ptlrpc_request_pool *pool;
648
649         OBD_ALLOC_PTR(pool);
650         if (!pool)
651                 return NULL;
652
653         /*
654          * Request next power of two for the allocation, because internally
655          * kernel would do exactly this
656          */
657         spin_lock_init(&pool->prp_lock);
658         INIT_LIST_HEAD(&pool->prp_req_list);
659         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
660         pool->prp_populate = populate_pool;
661
662         populate_pool(pool, num_rq);
663
664         return pool;
665 }
666 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
667
668 /**
669  * Fetches one request from pool \a pool
670  */
671 static struct ptlrpc_request *
672 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
673 {
674         struct ptlrpc_request *request;
675         struct lustre_msg *reqbuf;
676
677         if (!pool)
678                 return NULL;
679
680         spin_lock(&pool->prp_lock);
681
682         /*
683          * See if we have anything in a pool, and bail out if nothing,
684          * in writeout path, where this matters, this is safe to do, because
685          * nothing is lost in this case, and when some in-flight requests
686          * complete, this code will be called again.
687          */
688         if (unlikely(list_empty(&pool->prp_req_list))) {
689                 spin_unlock(&pool->prp_lock);
690                 return NULL;
691         }
692
693         request = list_first_entry(&pool->prp_req_list, struct ptlrpc_request,
694                                    rq_list);
695         list_del_init(&request->rq_list);
696         spin_unlock(&pool->prp_lock);
697
698         LASSERT(request->rq_reqbuf);
699         LASSERT(request->rq_pool);
700
701         reqbuf = request->rq_reqbuf;
702         memset(request, 0, sizeof(*request));
703         request->rq_reqbuf = reqbuf;
704         request->rq_reqbuf_len = pool->prp_rq_size;
705         request->rq_pool = pool;
706
707         return request;
708 }
709
710 /**
711  * Returns freed \a request to pool.
712  */
713 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
714 {
715         struct ptlrpc_request_pool *pool = request->rq_pool;
716
717         spin_lock(&pool->prp_lock);
718         LASSERT(list_empty(&request->rq_list));
719         LASSERT(!request->rq_receiving_reply);
720         list_add_tail(&request->rq_list, &pool->prp_req_list);
721         spin_unlock(&pool->prp_lock);
722 }
723
724 void ptlrpc_add_unreplied(struct ptlrpc_request *req)
725 {
726         struct obd_import *imp = req->rq_import;
727         struct ptlrpc_request *iter;
728
729         assert_spin_locked(&imp->imp_lock);
730         LASSERT(list_empty(&req->rq_unreplied_list));
731
732         /* unreplied list is sorted by xid in ascending order */
733         list_for_each_entry_reverse(iter, &imp->imp_unreplied_list,
734                                     rq_unreplied_list) {
735                 LASSERT(req->rq_xid != iter->rq_xid);
736                 if (req->rq_xid < iter->rq_xid)
737                         continue;
738                 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
739                 return;
740         }
741         list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
742 }
743
744 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
745 {
746         req->rq_xid = ptlrpc_next_xid();
747         ptlrpc_add_unreplied(req);
748 }
749
750 static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
751 {
752         spin_lock(&req->rq_import->imp_lock);
753         ptlrpc_assign_next_xid_nolock(req);
754         spin_unlock(&req->rq_import->imp_lock);
755 }
756
757 static atomic64_t ptlrpc_last_xid;
758
759 static void ptlrpc_reassign_next_xid(struct ptlrpc_request *req)
760 {
761         spin_lock(&req->rq_import->imp_lock);
762         list_del_init(&req->rq_unreplied_list);
763         ptlrpc_assign_next_xid_nolock(req);
764         spin_unlock(&req->rq_import->imp_lock);
765         DEBUG_REQ(D_RPCTRACE, req, "reassign xid");
766 }
767
768 void ptlrpc_get_mod_rpc_slot(struct ptlrpc_request *req)
769 {
770         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
771         __u32 opc;
772         __u16 tag;
773
774         opc = lustre_msg_get_opc(req->rq_reqmsg);
775         tag = obd_get_mod_rpc_slot(cli, opc);
776         lustre_msg_set_tag(req->rq_reqmsg, tag);
777         ptlrpc_reassign_next_xid(req);
778 }
779 EXPORT_SYMBOL(ptlrpc_get_mod_rpc_slot);
780
781 void ptlrpc_put_mod_rpc_slot(struct ptlrpc_request *req)
782 {
783         __u16 tag = lustre_msg_get_tag(req->rq_reqmsg);
784
785         if (tag != 0) {
786                 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
787                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
788
789                 obd_put_mod_rpc_slot(cli, opc, tag);
790         }
791 }
792 EXPORT_SYMBOL(ptlrpc_put_mod_rpc_slot);
793
794 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
795                              __u32 version, int opcode, char **bufs,
796                              struct ptlrpc_cli_ctx *ctx)
797 {
798         int count;
799         struct obd_import *imp;
800         __u32 *lengths;
801         int rc;
802
803         ENTRY;
804
805         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
806         imp = request->rq_import;
807         lengths = request->rq_pill.rc_area[RCL_CLIENT];
808
809         if (ctx) {
810                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
811         } else {
812                 rc = sptlrpc_req_get_ctx(request);
813                 if (rc)
814                         GOTO(out_free, rc);
815         }
816         sptlrpc_req_set_flavor(request, opcode);
817
818         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
819                                  lengths, bufs);
820         if (rc)
821                 GOTO(out_ctx, rc);
822
823         lustre_msg_add_version(request->rq_reqmsg, version);
824         request->rq_send_state = LUSTRE_IMP_FULL;
825         request->rq_type = PTL_RPC_MSG_REQUEST;
826
827         request->rq_req_cbid.cbid_fn  = request_out_callback;
828         request->rq_req_cbid.cbid_arg = request;
829
830         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
831         request->rq_reply_cbid.cbid_arg = request;
832
833         request->rq_reply_deadline = 0;
834         request->rq_bulk_deadline = 0;
835         request->rq_req_deadline = 0;
836         request->rq_phase = RQ_PHASE_NEW;
837         request->rq_next_phase = RQ_PHASE_UNDEFINED;
838
839         request->rq_request_portal = imp->imp_client->cli_request_portal;
840         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
841
842         ptlrpc_at_set_req_timeout(request);
843
844         lustre_msg_set_opc(request->rq_reqmsg, opcode);
845
846         /* Let's setup deadline for req/reply/bulk unlink for opcode. */
847         if (cfs_fail_val == opcode) {
848                 time64_t *fail_t = NULL, *fail2_t = NULL;
849
850                 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
851                         fail_t = &request->rq_bulk_deadline;
852                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
853                         fail_t = &request->rq_reply_deadline;
854                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
855                         fail_t = &request->rq_req_deadline;
856                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
857                         fail_t = &request->rq_reply_deadline;
858                         fail2_t = &request->rq_bulk_deadline;
859                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_ROUND_XID)) {
860                         time64_t now = ktime_get_real_seconds();
861                         u64 xid = ((u64)now >> 4) << 24;
862
863                         atomic64_set(&ptlrpc_last_xid, xid);
864                 }
865
866                 if (fail_t) {
867                         *fail_t = ktime_get_real_seconds() +
868                                   PTLRPC_REQ_LONG_UNLINK;
869
870                         if (fail2_t)
871                                 *fail2_t = ktime_get_real_seconds() +
872                                            PTLRPC_REQ_LONG_UNLINK;
873
874                         /*
875                          * The RPC is infected, let the test to change the
876                          * fail_loc
877                          */
878                         msleep(4 * MSEC_PER_SEC);
879                 }
880         }
881         ptlrpc_assign_next_xid(request);
882
883         RETURN(0);
884
885 out_ctx:
886         LASSERT(!request->rq_pool);
887         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
888 out_free:
889         atomic_dec(&imp->imp_reqs);
890         class_import_put(imp);
891
892         return rc;
893 }
894 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
895
896 /**
897  * Pack request buffers for network transfer, performing necessary encryption
898  * steps if necessary.
899  */
900 int ptlrpc_request_pack(struct ptlrpc_request *request,
901                         __u32 version, int opcode)
902 {
903         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
904 }
905 EXPORT_SYMBOL(ptlrpc_request_pack);
906
907 /**
908  * Helper function to allocate new request on import \a imp
909  * and possibly using existing request from pool \a pool if provided.
910  * Returns allocated request structure with import field filled or
911  * NULL on error.
912  */
913 static inline
914 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
915                                               struct ptlrpc_request_pool *pool)
916 {
917         struct ptlrpc_request *request = NULL;
918
919         request = ptlrpc_request_cache_alloc(GFP_NOFS);
920
921         if (!request && pool)
922                 request = ptlrpc_prep_req_from_pool(pool);
923
924         if (request) {
925                 ptlrpc_cli_req_init(request);
926
927                 LASSERTF((unsigned long)imp > 0x1000, "%px\n", imp);
928                 LASSERT(imp != LP_POISON);
929                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%px\n",
930                          imp->imp_client);
931                 LASSERT(imp->imp_client != LP_POISON);
932
933                 request->rq_import = class_import_get(imp);
934                 atomic_inc(&imp->imp_reqs);
935         } else {
936                 CERROR("request allocation out of memory\n");
937         }
938
939         return request;
940 }
941
942 static int ptlrpc_reconnect_if_idle(struct obd_import *imp)
943 {
944         int rc;
945
946         /*
947          * initiate connection if needed when the import has been
948          * referenced by the new request to avoid races with disconnect.
949          * serialize this check against conditional state=IDLE
950          * in ptlrpc_disconnect_idle_interpret()
951          */
952         spin_lock(&imp->imp_lock);
953         if (imp->imp_state == LUSTRE_IMP_IDLE) {
954                 imp->imp_generation++;
955                 imp->imp_initiated_at = imp->imp_generation;
956                 imp->imp_state = LUSTRE_IMP_NEW;
957
958                 /* connect_import_locked releases imp_lock */
959                 rc = ptlrpc_connect_import_locked(imp);
960                 if (rc)
961                         return rc;
962                 ptlrpc_pinger_add_import(imp);
963         } else {
964                 spin_unlock(&imp->imp_lock);
965         }
966         return 0;
967 }
968
969 /**
970  * Helper function for creating a request.
971  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
972  * buffer structures according to capsule template \a format.
973  * Returns allocated request structure pointer or NULL on error.
974  */
975 static struct ptlrpc_request *
976 ptlrpc_request_alloc_internal(struct obd_import *imp,
977                               struct ptlrpc_request_pool *pool,
978                               const struct req_format *format)
979 {
980         struct ptlrpc_request *request;
981
982         request = __ptlrpc_request_alloc(imp, pool);
983         if (!request)
984                 return NULL;
985
986         /* don't make expensive check for idling connection
987          * if it's already connected */
988         if (unlikely(imp->imp_state != LUSTRE_IMP_FULL)) {
989                 if (ptlrpc_reconnect_if_idle(imp) < 0) {
990                         atomic_dec(&imp->imp_reqs);
991                         ptlrpc_request_free(request);
992                         return NULL;
993                 }
994         }
995
996         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
997         req_capsule_set(&request->rq_pill, format);
998         return request;
999 }
1000
1001 /**
1002  * Allocate new request structure for import \a imp and initialize its
1003  * buffer structure according to capsule template \a format.
1004  */
1005 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
1006                                             const struct req_format *format)
1007 {
1008         return ptlrpc_request_alloc_internal(imp, NULL, format);
1009 }
1010 EXPORT_SYMBOL(ptlrpc_request_alloc);
1011
1012 /**
1013  * Allocate new request structure for import \a imp from pool \a pool and
1014  * initialize its buffer structure according to capsule template \a format.
1015  */
1016 struct ptlrpc_request *
1017 ptlrpc_request_alloc_pool(struct obd_import *imp,
1018                           struct ptlrpc_request_pool *pool,
1019                           const struct req_format *format)
1020 {
1021         return ptlrpc_request_alloc_internal(imp, pool, format);
1022 }
1023 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
1024
1025 /**
1026  * For requests not from pool, free memory of the request structure.
1027  * For requests obtained from a pool earlier, return request back to pool.
1028  */
1029 void ptlrpc_request_free(struct ptlrpc_request *request)
1030 {
1031         if (request->rq_pool)
1032                 __ptlrpc_free_req_to_pool(request);
1033         else
1034                 ptlrpc_request_cache_free(request);
1035 }
1036 EXPORT_SYMBOL(ptlrpc_request_free);
1037
1038 /**
1039  * Allocate new request for operatione \a opcode and immediatelly pack it for
1040  * network transfer.
1041  * Only used for simple requests like OBD_PING where the only important
1042  * part of the request is operation itself.
1043  * Returns allocated request or NULL on error.
1044  */
1045 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
1046                                                  const struct req_format *format,
1047                                                  __u32 version, int opcode)
1048 {
1049         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
1050         int rc;
1051
1052         if (req) {
1053                 rc = ptlrpc_request_pack(req, version, opcode);
1054                 if (rc) {
1055                         ptlrpc_request_free(req);
1056                         req = NULL;
1057                 }
1058         }
1059         return req;
1060 }
1061 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
1062
1063 /**
1064  * Allocate and initialize new request set structure on the current CPT.
1065  * Returns a pointer to the newly allocated set structure or NULL on error.
1066  */
1067 struct ptlrpc_request_set *ptlrpc_prep_set(void)
1068 {
1069         struct ptlrpc_request_set *set;
1070         int cpt;
1071
1072         ENTRY;
1073         cpt = cfs_cpt_current(cfs_cpt_tab, 0);
1074         OBD_CPT_ALLOC(set, cfs_cpt_tab, cpt, sizeof(*set));
1075         if (!set)
1076                 RETURN(NULL);
1077         atomic_set(&set->set_refcount, 1);
1078         INIT_LIST_HEAD(&set->set_requests);
1079         init_waitqueue_head(&set->set_waitq);
1080         atomic_set(&set->set_new_count, 0);
1081         atomic_set(&set->set_remaining, 0);
1082         spin_lock_init(&set->set_new_req_lock);
1083         INIT_LIST_HEAD(&set->set_new_requests);
1084         set->set_max_inflight = UINT_MAX;
1085         set->set_producer     = NULL;
1086         set->set_producer_arg = NULL;
1087         set->set_rc           = 0;
1088
1089         RETURN(set);
1090 }
1091 EXPORT_SYMBOL(ptlrpc_prep_set);
1092
1093 /**
1094  * Allocate and initialize new request set structure with flow control
1095  * extension. This extension allows to control the number of requests in-flight
1096  * for the whole set. A callback function to generate requests must be provided
1097  * and the request set will keep the number of requests sent over the wire to
1098  * @max_inflight.
1099  * Returns a pointer to the newly allocated set structure or NULL on error.
1100  */
1101 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1102                                              void *arg)
1103
1104 {
1105         struct ptlrpc_request_set *set;
1106
1107         set = ptlrpc_prep_set();
1108         if (!set)
1109                 RETURN(NULL);
1110
1111         set->set_max_inflight  = max;
1112         set->set_producer      = func;
1113         set->set_producer_arg  = arg;
1114
1115         RETURN(set);
1116 }
1117
1118 /**
1119  * Wind down and free request set structure previously allocated with
1120  * ptlrpc_prep_set.
1121  * Ensures that all requests on the set have completed and removes
1122  * all requests from the request list in a set.
1123  * If any unsent request happen to be on the list, pretends that they got
1124  * an error in flight and calls their completion handler.
1125  */
1126 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
1127 {
1128         struct ptlrpc_request *req;
1129         int expected_phase;
1130         int n = 0;
1131
1132         ENTRY;
1133
1134         /* Requests on the set should either all be completed, or all be new */
1135         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
1136                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
1137         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
1138                 LASSERT(req->rq_phase == expected_phase);
1139                 n++;
1140         }
1141
1142         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
1143                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
1144                  atomic_read(&set->set_remaining), n);
1145
1146         while ((req = list_first_entry_or_null(&set->set_requests,
1147                                                struct ptlrpc_request,
1148                                                rq_set_chain))) {
1149                 list_del_init(&req->rq_set_chain);
1150
1151                 LASSERT(req->rq_phase == expected_phase);
1152
1153                 if (req->rq_phase == RQ_PHASE_NEW) {
1154                         ptlrpc_req_interpret(NULL, req, -EBADR);
1155                         atomic_dec(&set->set_remaining);
1156                 }
1157
1158                 spin_lock(&req->rq_lock);
1159                 req->rq_set = NULL;
1160                 req->rq_invalid_rqset = 0;
1161                 spin_unlock(&req->rq_lock);
1162
1163                 ptlrpc_req_finished(req);
1164         }
1165
1166         LASSERT(atomic_read(&set->set_remaining) == 0);
1167
1168         ptlrpc_reqset_put(set);
1169         EXIT;
1170 }
1171 EXPORT_SYMBOL(ptlrpc_set_destroy);
1172
1173 /**
1174  * Add a new request to the general purpose request set.
1175  * Assumes request reference from the caller.
1176  */
1177 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
1178                         struct ptlrpc_request *req)
1179 {
1180         if (set == PTLRPCD_SET) {
1181                 ptlrpcd_add_req(req);
1182                 return;
1183         }
1184
1185         LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
1186         LASSERT(list_empty(&req->rq_set_chain));
1187
1188         if (req->rq_allow_intr)
1189                 set->set_allow_intr = 1;
1190
1191         /* The set takes over the caller's request reference */
1192         list_add_tail(&req->rq_set_chain, &set->set_requests);
1193         req->rq_set = set;
1194         atomic_inc(&set->set_remaining);
1195         req->rq_queued_time = ktime_get_seconds();
1196
1197         if (req->rq_reqmsg) {
1198                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1199                 lustre_msg_set_uid_gid(req->rq_reqmsg, NULL, NULL);
1200         }
1201
1202         if (set->set_producer)
1203                 /*
1204                  * If the request set has a producer callback, the RPC must be
1205                  * sent straight away
1206                  */
1207                 ptlrpc_send_new_req(req);
1208 }
1209 EXPORT_SYMBOL(ptlrpc_set_add_req);
1210
1211 /**
1212  * Add a request to a request with dedicated server thread
1213  * and wake the thread to make any necessary processing.
1214  * Currently only used for ptlrpcd.
1215  */
1216 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1217                             struct ptlrpc_request *req)
1218 {
1219         struct ptlrpc_request_set *set = pc->pc_set;
1220         int count, i;
1221
1222         LASSERT(req->rq_set == NULL);
1223         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1224
1225         spin_lock(&set->set_new_req_lock);
1226         /*
1227          * The set takes over the caller's request reference.
1228          */
1229         req->rq_set = set;
1230         req->rq_queued_time = ktime_get_seconds();
1231         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1232         count = atomic_inc_return(&set->set_new_count);
1233         spin_unlock(&set->set_new_req_lock);
1234
1235         /* Only need to call wakeup once for the first entry. */
1236         if (count == 1) {
1237                 wake_up(&set->set_waitq);
1238
1239                 /*
1240                  * XXX: It maybe unnecessary to wakeup all the partners. But to
1241                  *      guarantee the async RPC can be processed ASAP, we have
1242                  *      no other better choice. It maybe fixed in future.
1243                  */
1244                 for (i = 0; i < pc->pc_npartners; i++)
1245                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1246         }
1247 }
1248
1249 /**
1250  * Based on the current state of the import, determine if the request
1251  * can be sent, is an error, or should be delayed.
1252  *
1253  * Returns true if this request should be delayed. If false, and
1254  * *status is set, then the request can not be sent and *status is the
1255  * error code.  If false and status is 0, then request can be sent.
1256  *
1257  * The imp->imp_lock must be held.
1258  */
1259 static int ptlrpc_import_delay_req(struct obd_import *imp,
1260                                    struct ptlrpc_request *req, int *status)
1261 {
1262         int delay = 0;
1263
1264         ENTRY;
1265         LASSERT(status);
1266         *status = 0;
1267
1268         if (req->rq_ctx_init || req->rq_ctx_fini) {
1269                 /* always allow ctx init/fini rpc go through */
1270         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1271                 DEBUG_REQ(D_ERROR, req, "Uninitialized import");
1272                 *status = -EIO;
1273         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1274                 unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
1275
1276                 /*
1277                  * pings or MDS-equivalent STATFS may safely
1278                  * race with umount
1279                  */
1280                 DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
1281                           D_HA : D_ERROR, req, "IMP_CLOSED");
1282                 *status = -EIO;
1283         } else if (ptlrpc_send_limit_expired(req)) {
1284                 /* probably doesn't need to be a D_ERROR afterinitial testing */
1285                 DEBUG_REQ(D_HA, req, "send limit expired");
1286                 *status = -ETIMEDOUT;
1287         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1288                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1289                 ;/* allow CONNECT even if import is invalid */
1290                 if (atomic_read(&imp->imp_inval_count) != 0) {
1291                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1292                         *status = -EIO;
1293                 }
1294         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1295                 if (!imp->imp_deactive)
1296                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1297                 *status = -ESHUTDOWN; /* b=12940 */
1298         } else if (req->rq_import_generation != imp->imp_generation) {
1299                 DEBUG_REQ(req->rq_no_resend ? D_INFO : D_ERROR,
1300                           req, "req wrong generation:");
1301                 *status = -EIO;
1302         } else if (req->rq_send_state != imp->imp_state) {
1303                 /* invalidate in progress - any requests should be drop */
1304                 if (atomic_read(&imp->imp_inval_count) != 0) {
1305                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1306                         *status = -EIO;
1307                 } else if (req->rq_no_delay &&
1308                            imp->imp_generation != imp->imp_initiated_at) {
1309                         /* ignore nodelay for requests initiating connections */
1310                         *status = -EAGAIN;
1311                 } else if (req->rq_allow_replay &&
1312                            (imp->imp_state == LUSTRE_IMP_REPLAY ||
1313                             imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1314                             imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1315                             imp->imp_state == LUSTRE_IMP_RECOVER)) {
1316                         DEBUG_REQ(D_HA, req, "allow during recovery");
1317                 } else {
1318                         delay = 1;
1319                 }
1320         }
1321
1322         RETURN(delay);
1323 }
1324
1325 /**
1326  * Decide if the error message should be printed to the console or not.
1327  * Makes its decision based on request type, status, and failure frequency.
1328  *
1329  * \param[in] req  request that failed and may need a console message
1330  *
1331  * \retval false if no message should be printed
1332  * \retval true  if console message should be printed
1333  */
1334 static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
1335 {
1336         LASSERT(req->rq_reqmsg != NULL);
1337
1338         /* Suppress particular reconnect errors which are to be expected. */
1339         if (opc == OST_CONNECT || opc == OST_DISCONNECT ||
1340             opc == MDS_CONNECT || opc == MDS_DISCONNECT ||
1341             opc == MGS_CONNECT || opc == MGS_DISCONNECT) {
1342                 /* Suppress timed out reconnect/disconnect requests */
1343                 if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
1344                     req->rq_timedout)
1345                         return false;
1346
1347                 /*
1348                  * Suppress most unavailable/again reconnect requests, but
1349                  * print occasionally so it is clear client is trying to
1350                  * connect to a server where no target is running.
1351                  */
1352                 if ((err == -ENODEV || err == -EAGAIN) &&
1353                     req->rq_import->imp_conn_cnt % 30 != 20)
1354                         return false;
1355         }
1356
1357         if (opc == LDLM_ENQUEUE && err == -EAGAIN)
1358                 /* -EAGAIN is normal when using POSIX flocks */
1359                 return false;
1360
1361         if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
1362             (req->rq_xid & 0xf) != 10)
1363                 /* Suppress most ping requests, they may fail occasionally */
1364                 return false;
1365
1366         return true;
1367 }
1368
1369 /**
1370  * Check request processing status.
1371  * Returns the status.
1372  */
1373 static int ptlrpc_check_status(struct ptlrpc_request *req)
1374 {
1375         int rc;
1376
1377         ENTRY;
1378         rc = lustre_msg_get_status(req->rq_repmsg);
1379         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1380                 struct obd_import *imp = req->rq_import;
1381                 struct lnet_nid *nid = &imp->imp_connection->c_peer.nid;
1382                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1383
1384                 if (ptlrpc_console_allow(req, opc, rc))
1385                         LCONSOLE_ERROR_MSG(0x11,
1386                                            "%s: operation %s to node %s failed: rc = %d\n",
1387                                            imp->imp_obd->obd_name,
1388                                            ll_opcode2str(opc),
1389                                            libcfs_nidstr(nid), rc);
1390                 RETURN(rc < 0 ? rc : -EINVAL);
1391         }
1392
1393         if (rc)
1394                 DEBUG_REQ(D_INFO, req, "check status: rc = %d", rc);
1395
1396         RETURN(rc);
1397 }
1398
1399 /**
1400  * save pre-versions of objects into request for replay.
1401  * Versions are obtained from server reply.
1402  * used for VBR.
1403  */
1404 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1405 {
1406         struct lustre_msg *repmsg = req->rq_repmsg;
1407         struct lustre_msg *reqmsg = req->rq_reqmsg;
1408         __u64 *versions = lustre_msg_get_versions(repmsg);
1409
1410         ENTRY;
1411         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1412                 return;
1413
1414         LASSERT(versions);
1415         lustre_msg_set_versions(reqmsg, versions);
1416         CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
1417                versions[0], versions[1]);
1418
1419         EXIT;
1420 }
1421
1422 __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1423 {
1424         struct ptlrpc_request *req;
1425
1426         assert_spin_locked(&imp->imp_lock);
1427         if (list_empty(&imp->imp_unreplied_list))
1428                 return 0;
1429
1430         req = list_first_entry(&imp->imp_unreplied_list, struct ptlrpc_request,
1431                                rq_unreplied_list);
1432         LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1433
1434         if (imp->imp_known_replied_xid < req->rq_xid - 1)
1435                 imp->imp_known_replied_xid = req->rq_xid - 1;
1436
1437         return req->rq_xid - 1;
1438 }
1439
1440 /**
1441  * Callback function called when client receives RPC reply for \a req.
1442  * Returns 0 on success or error code.
1443  * The return alue would be assigned to req->rq_status by the caller
1444  * as request processing status.
1445  * This function also decides if the request needs to be saved for later replay.
1446  */
1447 static int after_reply(struct ptlrpc_request *req)
1448 {
1449         struct obd_import *imp = req->rq_import;
1450         struct obd_device *obd = req->rq_import->imp_obd;
1451         ktime_t work_start;
1452         u64 committed;
1453         s64 timediff;
1454         int rc;
1455
1456         ENTRY;
1457         LASSERT(obd != NULL);
1458         /* repbuf must be unlinked */
1459         LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
1460
1461         if (req->rq_reply_truncated) {
1462                 if (ptlrpc_no_resend(req)) {
1463                         DEBUG_REQ(D_ERROR, req,
1464                                   "reply buffer overflow, expected=%d, actual size=%d",
1465                                   req->rq_nob_received, req->rq_repbuf_len);
1466                         RETURN(-EOVERFLOW);
1467                 }
1468
1469                 sptlrpc_cli_free_repbuf(req);
1470                 /*
1471                  * Pass the required reply buffer size (include
1472                  * space for early reply).
1473                  * NB: no need to roundup because alloc_repbuf
1474                  * will roundup it
1475                  */
1476                 req->rq_replen = req->rq_nob_received;
1477                 req->rq_nob_received = 0;
1478                 spin_lock(&req->rq_lock);
1479                 req->rq_resend       = 1;
1480                 spin_unlock(&req->rq_lock);
1481                 RETURN(0);
1482         }
1483
1484         work_start = ktime_get_real();
1485         timediff = ktime_us_delta(work_start, req->rq_sent_ns);
1486
1487         /*
1488          * NB Until this point, the whole of the incoming message,
1489          * including buflens, status etc is in the sender's byte order.
1490          */
1491         rc = sptlrpc_cli_unwrap_reply(req);
1492         if (rc) {
1493                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed: rc = %d", rc);
1494                 RETURN(rc);
1495         }
1496
1497         /*
1498          * Security layer unwrap might ask resend this request.
1499          */
1500         if (req->rq_resend)
1501                 RETURN(0);
1502
1503         rc = unpack_reply(req);
1504         if (rc)
1505                 RETURN(rc);
1506
1507         /* retry indefinitely on EINPROGRESS */
1508         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1509             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1510                 time64_t now = ktime_get_real_seconds();
1511
1512                 DEBUG_REQ((req->rq_nr_resend % 8 == 1 ? D_WARNING : 0) |
1513                           D_RPCTRACE, req, "resending request on EINPROGRESS");
1514                 spin_lock(&req->rq_lock);
1515                 req->rq_resend = 1;
1516                 spin_unlock(&req->rq_lock);
1517                 req->rq_nr_resend++;
1518
1519                 /* Readjust the timeout for current conditions */
1520                 ptlrpc_at_set_req_timeout(req);
1521                 /*
1522                  * delay resend to give a chance to the server to get ready.
1523                  * The delay is increased by 1s on every resend and is capped to
1524                  * the current request timeout (i.e. obd_timeout if AT is off,
1525                  * or AT service time x 125% + 5s, see at_est2timeout)
1526                  */
1527                 if (req->rq_nr_resend > req->rq_timeout)
1528                         req->rq_sent = now + req->rq_timeout;
1529                 else
1530                         req->rq_sent = now + req->rq_nr_resend;
1531
1532                 /* Resend for EINPROGRESS will use a new XID */
1533                 spin_lock(&imp->imp_lock);
1534                 list_del_init(&req->rq_unreplied_list);
1535                 spin_unlock(&imp->imp_lock);
1536
1537                 RETURN(0);
1538         }
1539
1540         if (obd->obd_svc_stats) {
1541                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1542                                     timediff);
1543                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1544         }
1545
1546         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1547             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1548                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1549                           lustre_msg_get_type(req->rq_repmsg));
1550                 RETURN(-EPROTO);
1551         }
1552
1553         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1554                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1555         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1556         ptlrpc_at_adj_net_latency(req,
1557                                   lustre_msg_get_service_timeout(req->rq_repmsg));
1558
1559         rc = ptlrpc_check_status(req);
1560
1561         if (rc) {
1562                 /*
1563                  * Either we've been evicted, or the server has failed for
1564                  * some reason. Try to reconnect, and if that fails, punt to
1565                  * the upcall.
1566                  */
1567                 if (ptlrpc_recoverable_error(rc)) {
1568                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1569                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1570                                 RETURN(rc);
1571                         }
1572                         ptlrpc_request_handle_notconn(req);
1573                         RETURN(rc);
1574                 }
1575         } else {
1576                 /*
1577                  * Let's look if server sent slv. Do it only for RPC with
1578                  * rc == 0.
1579                  */
1580                 ldlm_cli_update_pool(req);
1581         }
1582
1583         /*
1584          * Store transno in reqmsg for replay.
1585          */
1586         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1587                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1588                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1589         }
1590
1591         if (lustre_msg_get_transno(req->rq_repmsg) ||
1592             lustre_msg_get_opc(req->rq_reqmsg) == LDLM_ENQUEUE)
1593                 imp->imp_no_cached_data = 0;
1594
1595         if (imp->imp_replayable) {
1596                 /* if other threads are waiting for ptlrpc_free_committed()
1597                  * they could continue the work of freeing RPCs. That reduces
1598                  * lock hold times, and distributes work more fairly across
1599                  * waiting threads.  We can't use spin_is_contended() since
1600                  * there are many other places where imp_lock is held.
1601                  */
1602                 atomic_inc(&imp->imp_waiting);
1603                 spin_lock(&imp->imp_lock);
1604                 atomic_dec(&imp->imp_waiting);
1605                 /*
1606                  * No point in adding already-committed requests to the replay
1607                  * list, we will just remove them immediately. b=9829
1608                  */
1609                 if (req->rq_transno != 0 &&
1610                     (req->rq_transno >
1611                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1612                      req->rq_replay)) {
1613                         /** version recovery */
1614                         ptlrpc_save_versions(req);
1615                         ptlrpc_retain_replayable_request(req, imp);
1616                 } else if (req->rq_commit_cb &&
1617                            list_empty(&req->rq_replay_list)) {
1618                         /*
1619                          * NB: don't call rq_commit_cb if it's already on
1620                          * rq_replay_list, ptlrpc_free_committed() will call
1621                          * it later, see LU-3618 for details
1622                          */
1623                         spin_unlock(&imp->imp_lock);
1624                         req->rq_commit_cb(req);
1625                         atomic_inc(&imp->imp_waiting);
1626                         spin_lock(&imp->imp_lock);
1627                         atomic_dec(&imp->imp_waiting);
1628                 }
1629
1630                 /*
1631                  * Replay-enabled imports return commit-status information.
1632                  */
1633                 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1634                 if (likely(committed > imp->imp_peer_committed_transno))
1635                         imp->imp_peer_committed_transno = committed;
1636
1637                 ptlrpc_free_committed(imp);
1638
1639                 if (!list_empty(&imp->imp_replay_list)) {
1640                         struct ptlrpc_request *last;
1641
1642                         last = list_entry(imp->imp_replay_list.prev,
1643                                           struct ptlrpc_request,
1644                                           rq_replay_list);
1645                         /*
1646                          * Requests with rq_replay stay on the list even if no
1647                          * commit is expected.
1648                          */
1649                         if (last->rq_transno > imp->imp_peer_committed_transno)
1650                                 ptlrpc_pinger_commit_expected(imp);
1651                 }
1652
1653                 spin_unlock(&imp->imp_lock);
1654         }
1655
1656         RETURN(rc);
1657 }
1658
1659 /**
1660  * Helper function to send request \a req over the network for the first time
1661  * Also adjusts request phase.
1662  * Returns 0 on success or error code.
1663  */
1664 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1665 {
1666         struct obd_import *imp = req->rq_import;
1667         __u64 min_xid = 0;
1668         int rc;
1669
1670         ENTRY;
1671         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1672
1673         /* do not try to go further if there is not enough memory in pool */
1674         if (req->rq_sent && req->rq_bulk)
1675                 if (req->rq_bulk->bd_iov_count >
1676                     sptlrpc_pool_get_free_pages(0) &&
1677                     pool_is_at_full_capacity(0))
1678                         RETURN(-ENOMEM);
1679
1680         if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1681             (!req->rq_generation_set ||
1682              req->rq_import_generation == imp->imp_generation))
1683                 RETURN(0);
1684
1685         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1686
1687         spin_lock(&imp->imp_lock);
1688
1689         LASSERT(req->rq_xid != 0);
1690         LASSERT(!list_empty(&req->rq_unreplied_list));
1691
1692         if (!req->rq_generation_set)
1693                 req->rq_import_generation = imp->imp_generation;
1694
1695         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1696                 spin_lock(&req->rq_lock);
1697                 req->rq_waiting = 1;
1698                 spin_unlock(&req->rq_lock);
1699
1700                 DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
1701                           ptlrpc_import_state_name(req->rq_send_state),
1702                           ptlrpc_import_state_name(imp->imp_state));
1703                 LASSERT(list_empty(&req->rq_list));
1704                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1705                 atomic_inc(&req->rq_import->imp_inflight);
1706                 spin_unlock(&imp->imp_lock);
1707                 RETURN(0);
1708         }
1709
1710         if (rc != 0) {
1711                 spin_unlock(&imp->imp_lock);
1712                 req->rq_status = rc;
1713                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1714                 RETURN(rc);
1715         }
1716
1717         LASSERT(list_empty(&req->rq_list));
1718         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1719         atomic_inc(&req->rq_import->imp_inflight);
1720
1721         /*
1722          * find the known replied XID from the unreplied list, CONNECT
1723          * and DISCONNECT requests are skipped to make the sanity check
1724          * on server side happy. see process_req_last_xid().
1725          *
1726          * For CONNECT: Because replay requests have lower XID, it'll
1727          * break the sanity check if CONNECT bump the exp_last_xid on
1728          * server.
1729          *
1730          * For DISCONNECT: Since client will abort inflight RPC before
1731          * sending DISCONNECT, DISCONNECT may carry an XID which higher
1732          * than the inflight RPC.
1733          */
1734         if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1735                 min_xid = ptlrpc_known_replied_xid(imp);
1736         spin_unlock(&imp->imp_lock);
1737
1738         lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1739
1740         lustre_msg_set_status(req->rq_reqmsg, current->pid);
1741
1742         /* If the request to be sent is an LDLM callback, do not try to
1743          * refresh context.
1744          * An LDLM callback is sent by a server to a client in order to make
1745          * it release a lock, on a communication channel that uses a reverse
1746          * context. It cannot be refreshed on its own, as it is the 'reverse'
1747          * (server-side) representation of a client context.
1748          * We do not care if the reverse context is expired, and want to send
1749          * the LDLM callback anyway. Once the client receives the AST, it is
1750          * its job to refresh its own context if it has expired, hence
1751          * refreshing the associated reverse context on server side, before
1752          * being able to send the LDLM_CANCEL requested by the server.
1753          */
1754         if (lustre_msg_get_opc(req->rq_reqmsg) != LDLM_BL_CALLBACK &&
1755             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_CP_CALLBACK &&
1756             lustre_msg_get_opc(req->rq_reqmsg) != LDLM_GL_CALLBACK)
1757                 rc = sptlrpc_req_refresh_ctx(req, 0);
1758         if (rc) {
1759                 if (req->rq_err) {
1760                         req->rq_status = rc;
1761                         RETURN(1);
1762                 } else {
1763                         spin_lock(&req->rq_lock);
1764                         req->rq_wait_ctx = 1;
1765                         spin_unlock(&req->rq_lock);
1766                         RETURN(0);
1767                 }
1768         }
1769
1770         CDEBUG(D_RPCTRACE,
1771                "Sending RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
1772                req, current->comm,
1773                imp->imp_obd->obd_uuid.uuid,
1774                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1775                obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg),
1776                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
1777
1778         rc = ptl_send_rpc(req, 0);
1779         if (rc == -ENOMEM) {
1780                 spin_lock(&imp->imp_lock);
1781                 if (!list_empty(&req->rq_list)) {
1782                         list_del_init(&req->rq_list);
1783                         if (atomic_dec_and_test(&req->rq_import->imp_inflight))
1784                                 wake_up(&req->rq_import->imp_recovery_waitq);
1785                 }
1786                 spin_unlock(&imp->imp_lock);
1787                 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1788                 RETURN(rc);
1789         }
1790         if (rc) {
1791                 DEBUG_REQ(D_HA, req, "send failed, expect timeout: rc = %d",
1792                           rc);
1793                 spin_lock(&req->rq_lock);
1794                 req->rq_net_err = 1;
1795                 spin_unlock(&req->rq_lock);
1796                 RETURN(rc);
1797         }
1798         RETURN(0);
1799 }
1800
1801 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1802 {
1803         int remaining, rc;
1804
1805         ENTRY;
1806         LASSERT(set->set_producer != NULL);
1807
1808         remaining = atomic_read(&set->set_remaining);
1809
1810         /*
1811          * populate the ->set_requests list with requests until we
1812          * reach the maximum number of RPCs in flight for this set
1813          */
1814         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1815                 rc = set->set_producer(set, set->set_producer_arg);
1816                 if (rc == -ENOENT) {
1817                         /* no more RPC to produce */
1818                         set->set_producer     = NULL;
1819                         set->set_producer_arg = NULL;
1820                         RETURN(0);
1821                 }
1822         }
1823
1824         RETURN((atomic_read(&set->set_remaining) - remaining));
1825 }
1826
1827 /**
1828  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1829  * and no more replies are expected.
1830  * (it is possible to get less replies than requests sent e.g. due to timed out
1831  * requests or requests that we had trouble to send out)
1832  *
1833  * NOTE: This function contains a potential schedule point (cond_resched()).
1834  */
1835 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1836 {
1837         struct ptlrpc_request *req, *next;
1838         LIST_HEAD(comp_reqs);
1839         int force_timer_recalc = 0;
1840
1841         ENTRY;
1842         if (atomic_read(&set->set_remaining) == 0)
1843                 RETURN(1);
1844
1845         list_for_each_entry_safe(req, next, &set->set_requests,
1846                                  rq_set_chain) {
1847                 struct obd_import *imp = req->rq_import;
1848                 int unregistered = 0;
1849                 int async = 1;
1850                 int rc = 0;
1851
1852                 if (req->rq_phase == RQ_PHASE_COMPLETE) {
1853                         list_move_tail(&req->rq_set_chain, &comp_reqs);
1854                         continue;
1855                 }
1856
1857                 /*
1858                  * This schedule point is mainly for the ptlrpcd caller of this
1859                  * function.  Most ptlrpc sets are not long-lived and unbounded
1860                  * in length, but at the least the set used by the ptlrpcd is.
1861                  * Since the processing time is unbounded, we need to insert an
1862                  * explicit schedule point to make the thread well-behaved.
1863                  */
1864                 cond_resched();
1865
1866                 /*
1867                  * If the caller requires to allow to be interpreted by force
1868                  * and it has really been interpreted, then move the request
1869                  * to RQ_PHASE_INTERPRET phase in spite of what the current
1870                  * phase is.
1871                  */
1872                 if (unlikely(req->rq_allow_intr && req->rq_intr)) {
1873                         req->rq_status = -EINTR;
1874                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1875
1876                         /*
1877                          * Since it is interpreted and we have to wait for
1878                          * the reply to be unlinked, then use sync mode.
1879                          */
1880                         async = 0;
1881
1882                         GOTO(interpret, req->rq_status);
1883                 }
1884
1885                 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
1886                         force_timer_recalc = 1;
1887
1888                 /* delayed send - skip */
1889                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1890                         continue;
1891
1892                 /* delayed resend - skip */
1893                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1894                     req->rq_sent > ktime_get_real_seconds())
1895                         continue;
1896
1897                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1898                       req->rq_phase == RQ_PHASE_BULK ||
1899                       req->rq_phase == RQ_PHASE_INTERPRET ||
1900                       req->rq_phase == RQ_PHASE_UNREG_RPC ||
1901                       req->rq_phase == RQ_PHASE_UNREG_BULK)) {
1902                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1903                         LBUG();
1904                 }
1905
1906                 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
1907                     req->rq_phase == RQ_PHASE_UNREG_BULK) {
1908                         LASSERT(req->rq_next_phase != req->rq_phase);
1909                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1910
1911                         if (req->rq_req_deadline &&
1912                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
1913                                 req->rq_req_deadline = 0;
1914                         if (req->rq_reply_deadline &&
1915                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
1916                                 req->rq_reply_deadline = 0;
1917                         if (req->rq_bulk_deadline &&
1918                             !CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
1919                                 req->rq_bulk_deadline = 0;
1920
1921                         /*
1922                          * Skip processing until reply is unlinked. We
1923                          * can't return to pool before that and we can't
1924                          * call interpret before that. We need to make
1925                          * sure that all rdma transfers finished and will
1926                          * not corrupt any data.
1927                          */
1928                         if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
1929                             ptlrpc_cli_wait_unlink(req))
1930                                 continue;
1931                         if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
1932                             ptlrpc_client_bulk_active(req))
1933                                 continue;
1934
1935                         /*
1936                          * Turn fail_loc off to prevent it from looping
1937                          * forever.
1938                          */
1939                         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1940                                 CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1941                                                      CFS_FAIL_ONCE);
1942                         }
1943                         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1944                                 CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1945                                                      CFS_FAIL_ONCE);
1946                         }
1947
1948                         /*
1949                          * Move to next phase if reply was successfully
1950                          * unlinked.
1951                          */
1952                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1953                 }
1954
1955                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1956                         GOTO(interpret, req->rq_status);
1957
1958                 /*
1959                  * Note that this also will start async reply unlink.
1960                  */
1961                 if (req->rq_net_err && !req->rq_timedout) {
1962                         ptlrpc_expire_one_request(req, 1);
1963
1964                         /*
1965                          * Check if we still need to wait for unlink.
1966                          */
1967                         if (ptlrpc_cli_wait_unlink(req) ||
1968                             ptlrpc_client_bulk_active(req))
1969                                 continue;
1970                         /* If there is no need to resend, fail it now. */
1971                         if (req->rq_no_resend) {
1972                                 if (req->rq_status == 0)
1973                                         req->rq_status = -EIO;
1974                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1975                                 GOTO(interpret, req->rq_status);
1976                         } else {
1977                                 continue;
1978                         }
1979                 }
1980
1981                 if (req->rq_err) {
1982                         if (!ptlrpc_unregister_reply(req, 1)) {
1983                                 ptlrpc_unregister_bulk(req, 1);
1984                                 continue;
1985                         }
1986
1987                         spin_lock(&req->rq_lock);
1988                         req->rq_replied = 0;
1989                         spin_unlock(&req->rq_lock);
1990                         if (req->rq_status == 0)
1991                                 req->rq_status = -EIO;
1992                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1993                         GOTO(interpret, req->rq_status);
1994                 }
1995
1996                 /*
1997                  * ptlrpc_set_wait uses l_wait_event_abortable_timeout()
1998                  * so it sets rq_intr regardless of individual rpc
1999                  * timeouts. The synchronous IO waiting path sets
2000                  * rq_intr irrespective of whether ptlrpcd
2001                  * has seen a timeout.  Our policy is to only interpret
2002                  * interrupted rpcs after they have timed out, so we
2003                  * need to enforce that here.
2004                  */
2005
2006                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
2007                                      req->rq_wait_ctx)) {
2008                         req->rq_status = -EINTR;
2009                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2010                         GOTO(interpret, req->rq_status);
2011                 }
2012
2013                 if (req->rq_phase == RQ_PHASE_RPC) {
2014                         if (req->rq_timedout || req->rq_resend ||
2015                             req->rq_waiting || req->rq_wait_ctx) {
2016                                 int status;
2017
2018                                 if (!ptlrpc_unregister_reply(req, 1)) {
2019                                         ptlrpc_unregister_bulk(req, 1);
2020                                         continue;
2021                                 }
2022
2023                                 spin_lock(&imp->imp_lock);
2024                                 if (ptlrpc_import_delay_req(imp, req,
2025                                                             &status)) {
2026                                         /*
2027                                          * put on delay list - only if we wait
2028                                          * recovery finished - before send
2029                                          */
2030                                         list_move_tail(&req->rq_list,
2031                                                        &imp->imp_delayed_list);
2032                                         spin_unlock(&imp->imp_lock);
2033                                         continue;
2034                                 }
2035
2036                                 if (status != 0)  {
2037                                         req->rq_status = status;
2038                                         ptlrpc_rqphase_move(req,
2039                                                             RQ_PHASE_INTERPRET);
2040                                         spin_unlock(&imp->imp_lock);
2041                                         GOTO(interpret, req->rq_status);
2042                                 }
2043                                 /* ignore on just initiated connections */
2044                                 if (ptlrpc_no_resend(req) &&
2045                                     !req->rq_wait_ctx &&
2046                                     imp->imp_generation !=
2047                                     imp->imp_initiated_at) {
2048                                         req->rq_status = -ENOTCONN;
2049                                         ptlrpc_rqphase_move(req,
2050                                                             RQ_PHASE_INTERPRET);
2051                                         spin_unlock(&imp->imp_lock);
2052                                         GOTO(interpret, req->rq_status);
2053                                 }
2054
2055                                 /* don't resend too fast in case of network
2056                                  * errors.
2057                                  */
2058                                 if (ktime_get_real_seconds() < (req->rq_sent + 1)
2059                                     && req->rq_net_err && req->rq_timedout) {
2060
2061                                         DEBUG_REQ(D_INFO, req,
2062                                                   "throttle request");
2063                                         /* Don't try to resend RPC right away
2064                                          * as it is likely it will fail again
2065                                          * and ptlrpc_check_set() will be
2066                                          * called again, keeping this thread
2067                                          * busy. Instead, wait for the next
2068                                          * timeout. Flag it as resend to
2069                                          * ensure we don't wait to long.
2070                                          */
2071                                         req->rq_resend = 1;
2072                                         spin_unlock(&imp->imp_lock);
2073                                         continue;
2074                                 }
2075
2076                                 list_move_tail(&req->rq_list,
2077                                                &imp->imp_sending_list);
2078
2079                                 spin_unlock(&imp->imp_lock);
2080
2081                                 spin_lock(&req->rq_lock);
2082                                 req->rq_waiting = 0;
2083                                 spin_unlock(&req->rq_lock);
2084
2085                                 if (req->rq_timedout || req->rq_resend) {
2086                                         /*
2087                                          * This is re-sending anyways,
2088                                          * let's mark req as resend.
2089                                          */
2090                                         spin_lock(&req->rq_lock);
2091                                         req->rq_resend = 1;
2092                                         spin_unlock(&req->rq_lock);
2093                                 }
2094                                 /*
2095                                  * rq_wait_ctx is only touched by ptlrpcd,
2096                                  * so no lock is needed here.
2097                                  */
2098                                 status = sptlrpc_req_refresh_ctx(req, 0);
2099                                 if (status) {
2100                                         if (req->rq_err) {
2101                                                 req->rq_status = status;
2102                                                 spin_lock(&req->rq_lock);
2103                                                 req->rq_wait_ctx = 0;
2104                                                 spin_unlock(&req->rq_lock);
2105                                                 force_timer_recalc = 1;
2106                                         } else {
2107                                                 spin_lock(&req->rq_lock);
2108                                                 req->rq_wait_ctx = 1;
2109                                                 spin_unlock(&req->rq_lock);
2110                                         }
2111
2112                                         continue;
2113                                 } else {
2114                                         spin_lock(&req->rq_lock);
2115                                         req->rq_wait_ctx = 0;
2116                                         spin_unlock(&req->rq_lock);
2117                                 }
2118
2119                                 /*
2120                                  * In any case, the previous bulk should be
2121                                  * cleaned up to prepare for the new sending
2122                                  */
2123                                 if (req->rq_bulk &&
2124                                     !ptlrpc_unregister_bulk(req, 1))
2125                                         continue;
2126
2127                                 rc = ptl_send_rpc(req, 0);
2128                                 if (rc == -ENOMEM) {
2129                                         spin_lock(&imp->imp_lock);
2130                                         if (!list_empty(&req->rq_list))
2131                                                 list_del_init(&req->rq_list);
2132                                         spin_unlock(&imp->imp_lock);
2133                                         ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
2134                                         continue;
2135                                 }
2136                                 if (rc) {
2137                                         DEBUG_REQ(D_HA, req,
2138                                                   "send failed: rc = %d", rc);
2139                                         force_timer_recalc = 1;
2140                                         spin_lock(&req->rq_lock);
2141                                         req->rq_net_err = 1;
2142                                         spin_unlock(&req->rq_lock);
2143                                         continue;
2144                                 }
2145                                 /* need to reset the timeout */
2146                                 force_timer_recalc = 1;
2147                         }
2148
2149                         spin_lock(&req->rq_lock);
2150
2151                         if (ptlrpc_client_early(req)) {
2152                                 ptlrpc_at_recv_early_reply(req);
2153                                 spin_unlock(&req->rq_lock);
2154                                 continue;
2155                         }
2156
2157                         /* Still waiting for a reply? */
2158                         if (ptlrpc_client_recv(req)) {
2159                                 spin_unlock(&req->rq_lock);
2160                                 continue;
2161                         }
2162
2163                         /* Did we actually receive a reply? */
2164                         if (!ptlrpc_client_replied(req)) {
2165                                 spin_unlock(&req->rq_lock);
2166                                 continue;
2167                         }
2168
2169                         spin_unlock(&req->rq_lock);
2170
2171                         /*
2172                          * unlink from net because we are going to
2173                          * swab in-place of reply buffer
2174                          */
2175                         unregistered = ptlrpc_unregister_reply(req, 1);
2176                         if (!unregistered)
2177                                 continue;
2178
2179                         req->rq_status = after_reply(req);
2180                         if (req->rq_resend) {
2181                                 force_timer_recalc = 1;
2182                                 continue;
2183                         }
2184
2185                         /*
2186                          * If there is no bulk associated with this request,
2187                          * then we're done and should let the interpreter
2188                          * process the reply. Similarly if the RPC returned
2189                          * an error, and therefore the bulk will never arrive.
2190                          */
2191                         if (!req->rq_bulk || req->rq_status < 0) {
2192                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2193                                 GOTO(interpret, req->rq_status);
2194                         }
2195
2196                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
2197                 }
2198
2199                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
2200                 if (ptlrpc_client_bulk_active(req))
2201                         continue;
2202
2203                 if (req->rq_bulk->bd_failure) {
2204                         /*
2205                          * The RPC reply arrived OK, but the bulk screwed
2206                          * up!  Dead weird since the server told us the RPC
2207                          * was good after getting the REPLY for her GET or
2208                          * the ACK for her PUT.
2209                          */
2210                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed %d/%d/%d",
2211                                   req->rq_status,
2212                                   req->rq_bulk->bd_nob,
2213                                   req->rq_bulk->bd_nob_transferred);
2214                         req->rq_status = -EIO;
2215                 }
2216
2217                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2218
2219 interpret:
2220                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
2221
2222                 /*
2223                  * This moves to "unregistering" phase we need to wait for
2224                  * reply unlink.
2225                  */
2226                 if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
2227                         /* start async bulk unlink too */
2228                         ptlrpc_unregister_bulk(req, 1);
2229                         continue;
2230                 }
2231
2232                 if (!ptlrpc_unregister_bulk(req, async))
2233                         continue;
2234
2235                 /*
2236                  * When calling interpret receiving already should be
2237                  * finished.
2238                  */
2239                 LASSERT(!req->rq_receiving_reply);
2240
2241                 ptlrpc_req_interpret(env, req, req->rq_status);
2242
2243                 if (ptlrpcd_check_work(req)) {
2244                         atomic_dec(&set->set_remaining);
2245                         continue;
2246                 }
2247                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
2248
2249                 if (req->rq_reqmsg)
2250                         CDEBUG(D_RPCTRACE,
2251                                "Completed RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
2252                                req, current->comm,
2253                                imp->imp_obd->obd_uuid.uuid,
2254                                lustre_msg_get_status(req->rq_reqmsg),
2255                                req->rq_xid,
2256                                obd_import_nid2str(imp),
2257                                lustre_msg_get_opc(req->rq_reqmsg),
2258                                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
2259
2260                 spin_lock(&imp->imp_lock);
2261                 /*
2262                  * Request already may be not on sending or delaying list. This
2263                  * may happen in the case of marking it erroneous for the case
2264                  * ptlrpc_import_delay_req(req, status) find it impossible to
2265                  * allow sending this rpc and returns *status != 0.
2266                  */
2267                 if (!list_empty(&req->rq_list)) {
2268                         list_del_init(&req->rq_list);
2269                         if (atomic_dec_and_test(&imp->imp_inflight))
2270                                 wake_up(&imp->imp_recovery_waitq);
2271                 }
2272                 list_del_init(&req->rq_unreplied_list);
2273                 spin_unlock(&imp->imp_lock);
2274
2275                 atomic_dec(&set->set_remaining);
2276                 wake_up(&imp->imp_recovery_waitq);
2277
2278                 if (set->set_producer) {
2279                         /* produce a new request if possible */
2280                         if (ptlrpc_set_producer(set) > 0)
2281                                 force_timer_recalc = 1;
2282
2283                         /*
2284                          * free the request that has just been completed
2285                          * in order not to pollute set->set_requests
2286                          */
2287                         list_del_init(&req->rq_set_chain);
2288                         spin_lock(&req->rq_lock);
2289                         req->rq_set = NULL;
2290                         req->rq_invalid_rqset = 0;
2291                         spin_unlock(&req->rq_lock);
2292
2293                         /* record rq_status to compute the final status later */
2294                         if (req->rq_status != 0)
2295                                 set->set_rc = req->rq_status;
2296                         ptlrpc_req_finished(req);
2297                 } else {
2298                         list_move_tail(&req->rq_set_chain, &comp_reqs);
2299                 }
2300         }
2301
2302         /*
2303          * move completed request at the head of list so it's easier for
2304          * caller to find them
2305          */
2306         list_splice(&comp_reqs, &set->set_requests);
2307
2308         /* If we hit an error, we want to recover promptly. */
2309         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
2310 }
2311 EXPORT_SYMBOL(ptlrpc_check_set);
2312
2313 /**
2314  * Time out request \a req. is \a async_unlink is set, that means do not wait
2315  * until LNet actually confirms network buffer unlinking.
2316  * Return 1 if we should give up further retrying attempts or 0 otherwise.
2317  */
2318 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
2319 {
2320         struct obd_import *imp = req->rq_import;
2321         unsigned int debug_mask = D_RPCTRACE;
2322         int rc = 0;
2323         __u32 opc;
2324
2325         ENTRY;
2326         spin_lock(&req->rq_lock);
2327         req->rq_timedout = 1;
2328         spin_unlock(&req->rq_lock);
2329
2330         opc = lustre_msg_get_opc(req->rq_reqmsg);
2331         if (ptlrpc_console_allow(req, opc,
2332                                  lustre_msg_get_status(req->rq_reqmsg)))
2333                 debug_mask = D_WARNING;
2334         DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
2335                   req->rq_net_err ? "failed due to network error" :
2336                      ((req->rq_real_sent == 0 ||
2337                        req->rq_real_sent < req->rq_sent ||
2338                        req->rq_real_sent >= req->rq_deadline) ?
2339                       "timed out for sent delay" : "timed out for slow reply"),
2340                   req->rq_sent, req->rq_real_sent);
2341
2342         if (imp && obd_debug_peer_on_timeout)
2343                 LNetDebugPeer(&imp->imp_connection->c_peer);
2344
2345         ptlrpc_unregister_reply(req, async_unlink);
2346         ptlrpc_unregister_bulk(req, async_unlink);
2347
2348         if (obd_dump_on_timeout)
2349                 libcfs_debug_dumplog();
2350
2351         if (!imp) {
2352                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
2353                 RETURN(1);
2354         }
2355
2356         atomic_inc(&imp->imp_timeouts);
2357
2358         /* The DLM server doesn't want recovery run on its imports. */
2359         if (imp->imp_dlm_fake)
2360                 RETURN(1);
2361
2362         /*
2363          * If this request is for recovery or other primordial tasks,
2364          * then error it out here.
2365          */
2366         if (req->rq_ctx_init || req->rq_ctx_fini ||
2367             req->rq_send_state != LUSTRE_IMP_FULL ||
2368             imp->imp_obd->obd_no_recov) {
2369                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
2370                           ptlrpc_import_state_name(req->rq_send_state),
2371                           ptlrpc_import_state_name(imp->imp_state));
2372                 spin_lock(&req->rq_lock);
2373                 req->rq_status = -ETIMEDOUT;
2374                 req->rq_err = 1;
2375                 spin_unlock(&req->rq_lock);
2376                 RETURN(1);
2377         }
2378
2379         /*
2380          * if a request can't be resent we can't wait for an answer after
2381          * the timeout
2382          */
2383         if (ptlrpc_no_resend(req)) {
2384                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
2385                 rc = 1;
2386         }
2387
2388         if (opc != OBD_PING || req->rq_xid > imp->imp_highest_replied_xid)
2389                 ptlrpc_fail_import(imp,
2390                                    lustre_msg_get_conn_cnt(req->rq_reqmsg));
2391
2392         RETURN(rc);
2393 }
2394
2395 /**
2396  * Time out all uncompleted requests in request set pointed by \a data
2397  * This is called when a wait times out.
2398  */
2399 void ptlrpc_expired_set(struct ptlrpc_request_set *set)
2400 {
2401         struct ptlrpc_request *req;
2402         time64_t now = ktime_get_real_seconds();
2403
2404         ENTRY;
2405         LASSERT(set != NULL);
2406
2407         /*
2408          * A timeout expired. See which reqs it applies to...
2409          */
2410         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2411                 /* don't expire request waiting for context */
2412                 if (req->rq_wait_ctx)
2413                         continue;
2414
2415                 /* Request in-flight? */
2416                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2417                        !req->rq_waiting && !req->rq_resend) ||
2418                       (req->rq_phase == RQ_PHASE_BULK)))
2419                         continue;
2420
2421                 if (req->rq_timedout ||     /* already dealt with */
2422                     req->rq_deadline > now) /* not expired */
2423                         continue;
2424
2425                 /*
2426                  * Deal with this guy. Do it asynchronously to not block
2427                  * ptlrpcd thread.
2428                  */
2429                 ptlrpc_expire_one_request(req, 1);
2430                 /*
2431                  * Loops require that we resched once in a while to avoid
2432                  * RCU stalls and a few other problems.
2433                  */
2434                 cond_resched();
2435
2436         }
2437 }
2438
2439 /**
2440  * Interrupts (sets interrupted flag) all uncompleted requests in
2441  * a set \a data. This is called when a wait_event is interrupted
2442  * by a signal.
2443  */
2444 static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set)
2445 {
2446         struct ptlrpc_request *req;
2447
2448         LASSERT(set != NULL);
2449         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2450
2451         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2452                 if (req->rq_intr)
2453                         continue;
2454
2455                 if (req->rq_phase != RQ_PHASE_RPC &&
2456                     req->rq_phase != RQ_PHASE_UNREG_RPC &&
2457                     !req->rq_allow_intr)
2458                         continue;
2459
2460                 spin_lock(&req->rq_lock);
2461                 req->rq_intr = 1;
2462                 spin_unlock(&req->rq_lock);
2463         }
2464 }
2465
2466 /**
2467  * Get the smallest timeout in the set; this does NOT set a timeout.
2468  */
2469 time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2470 {
2471         time64_t now = ktime_get_real_seconds();
2472         int timeout = 0;
2473         struct ptlrpc_request *req;
2474         time64_t deadline;
2475
2476         ENTRY;
2477         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2478                 /* Request in-flight? */
2479                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2480                       (req->rq_phase == RQ_PHASE_BULK) ||
2481                       (req->rq_phase == RQ_PHASE_NEW)))
2482                         continue;
2483
2484                 /* Already timed out. */
2485                 if (req->rq_timedout)
2486                         continue;
2487
2488                 /* Waiting for ctx. */
2489                 if (req->rq_wait_ctx)
2490                         continue;
2491
2492                 if (req->rq_phase == RQ_PHASE_NEW)
2493                         deadline = req->rq_sent;
2494                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2495                         deadline = req->rq_sent;
2496                 else
2497                         deadline = req->rq_sent + req->rq_timeout;
2498
2499                 if (deadline <= now)    /* actually expired already */
2500                         timeout = 1;    /* ASAP */
2501                 else if (timeout == 0 || timeout > deadline - now)
2502                         timeout = deadline - now;
2503         }
2504         RETURN(timeout);
2505 }
2506
2507 /**
2508  * Send all unset request from the set and then wait untill all
2509  * requests in the set complete (either get a reply, timeout, get an
2510  * error or otherwise be interrupted).
2511  * Returns 0 on success or error code otherwise.
2512  */
2513 int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set)
2514 {
2515         struct ptlrpc_request *req;
2516         time64_t timeout;
2517         int rc;
2518
2519         ENTRY;
2520         if (set->set_producer)
2521                 (void)ptlrpc_set_producer(set);
2522         else
2523                 list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2524                         if (req->rq_phase == RQ_PHASE_NEW)
2525                                 (void)ptlrpc_send_new_req(req);
2526                 }
2527
2528         if (list_empty(&set->set_requests))
2529                 RETURN(0);
2530
2531         do {
2532                 timeout = ptlrpc_set_next_timeout(set);
2533
2534                 /*
2535                  * wait until all complete, interrupted, or an in-flight
2536                  * req times out
2537                  */
2538                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
2539                        set, timeout);
2540
2541                 if ((timeout == 0 && !signal_pending(current)) ||
2542                     set->set_allow_intr) {
2543                         /*
2544                          * No requests are in-flight (ether timed out
2545                          * or delayed), so we can allow interrupts.
2546                          * We still want to block for a limited time,
2547                          * so we allow interrupts during the timeout.
2548                          */
2549                         rc = l_wait_event_abortable_timeout(
2550                                 set->set_waitq,
2551                                 ptlrpc_check_set(NULL, set),
2552                                 cfs_time_seconds(timeout ? timeout : 1));
2553                         if (rc == 0) {
2554                                 rc = -ETIMEDOUT;
2555                                 ptlrpc_expired_set(set);
2556                         } else if (rc < 0) {
2557                                 rc = -EINTR;
2558                                 ptlrpc_interrupted_set(set);
2559                         } else {
2560                                 rc = 0;
2561                         }
2562                 } else {
2563                         /*
2564                          * At least one request is in flight, so no
2565                          * interrupts are allowed. Wait until all
2566                          * complete, or an in-flight req times out.
2567                          */
2568                         rc = wait_event_idle_timeout(
2569                                 set->set_waitq,
2570                                 ptlrpc_check_set(NULL, set),
2571                                 cfs_time_seconds(timeout ? timeout : 1));
2572                         if (rc == 0) {
2573                                 ptlrpc_expired_set(set);
2574                                 rc = -ETIMEDOUT;
2575                         } else {
2576                                 rc = 0;
2577                         }
2578
2579                         /*
2580                          * LU-769 - if we ignored the signal because
2581                          * it was already pending when we started, we
2582                          * need to handle it now or we risk it being
2583                          * ignored forever
2584                          */
2585                         if (rc == -ETIMEDOUT &&
2586                             signal_pending(current)) {
2587                                 sigset_t old, new;
2588
2589                                 siginitset(&new, LUSTRE_FATAL_SIGS);
2590                                 sigprocmask(SIG_BLOCK, &new, &old);
2591                                 /*
2592                                  * In fact we only interrupt for the
2593                                  * "fatal" signals like SIGINT or
2594                                  * SIGKILL. We still ignore less
2595                                  * important signals since ptlrpc set
2596                                  * is not easily reentrant from
2597                                  * userspace again
2598                                  */
2599                                 if (signal_pending(current))
2600                                         ptlrpc_interrupted_set(set);
2601                                 sigprocmask(SIG_SETMASK, &old, NULL);
2602                         }
2603                 }
2604
2605                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2606
2607                 /*
2608                  * -EINTR => all requests have been flagged rq_intr so next
2609                  * check completes.
2610                  * -ETIMEDOUT => someone timed out.  When all reqs have
2611                  * timed out, signals are enabled allowing completion with
2612                  * EINTR.
2613                  * I don't really care if we go once more round the loop in
2614                  * the error cases -eeb.
2615                  */
2616                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2617                         list_for_each_entry(req, &set->set_requests,
2618                                             rq_set_chain) {
2619                                 spin_lock(&req->rq_lock);
2620                                 req->rq_invalid_rqset = 1;
2621                                 spin_unlock(&req->rq_lock);
2622                         }
2623                 }
2624         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2625
2626         LASSERT(atomic_read(&set->set_remaining) == 0);
2627
2628         rc = set->set_rc; /* rq_status of already freed requests if any */
2629         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
2630                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2631                 if (req->rq_status != 0)
2632                         rc = req->rq_status;
2633         }
2634
2635         RETURN(rc);
2636 }
2637 EXPORT_SYMBOL(ptlrpc_set_wait);
2638
2639 /**
2640  * Helper fuction for request freeing.
2641  * Called when request count reached zero and request needs to be freed.
2642  * Removes request from all sorts of sending/replay lists it might be on,
2643  * frees network buffers if any are present.
2644  * If \a locked is set, that means caller is already holding import imp_lock
2645  * and so we no longer need to reobtain it (for certain lists manipulations)
2646  */
2647 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2648 {
2649         ENTRY;
2650
2651         if (!request)
2652                 RETURN_EXIT;
2653
2654         LASSERT(!request->rq_srv_req);
2655         LASSERT(request->rq_export == NULL);
2656         LASSERTF(!request->rq_receiving_reply, "req %px\n", request);
2657         LASSERTF(list_empty(&request->rq_list), "req %px\n", request);
2658         LASSERTF(list_empty(&request->rq_set_chain), "req %px\n", request);
2659         LASSERTF(!request->rq_replay, "req %px\n", request);
2660
2661         req_capsule_fini(&request->rq_pill);
2662
2663         /*
2664          * We must take it off the imp_replay_list first.  Otherwise, we'll set
2665          * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2666          */
2667         if (request->rq_import) {
2668                 if (!locked)
2669                         spin_lock(&request->rq_import->imp_lock);
2670                 list_del_init(&request->rq_replay_list);
2671                 list_del_init(&request->rq_unreplied_list);
2672                 if (!locked)
2673                         spin_unlock(&request->rq_import->imp_lock);
2674         }
2675         LASSERTF(list_empty(&request->rq_replay_list), "req %px\n", request);
2676
2677         if (atomic_read(&request->rq_refcount) != 0) {
2678                 DEBUG_REQ(D_ERROR, request,
2679                           "freeing request with nonzero refcount");
2680                 LBUG();
2681         }
2682
2683         if (request->rq_repbuf)
2684                 sptlrpc_cli_free_repbuf(request);
2685
2686         if (request->rq_import) {
2687                 if (!ptlrpcd_check_work(request)) {
2688                         LASSERT(atomic_read(&request->rq_import->imp_reqs) > 0);
2689                         atomic_dec(&request->rq_import->imp_reqs);
2690                 }
2691                 class_import_put(request->rq_import);
2692                 request->rq_import = NULL;
2693         }
2694         if (request->rq_bulk)
2695                 ptlrpc_free_bulk(request->rq_bulk);
2696
2697         if (request->rq_reqbuf || request->rq_clrbuf)
2698                 sptlrpc_cli_free_reqbuf(request);
2699
2700         if (request->rq_cli_ctx)
2701                 sptlrpc_req_put_ctx(request, !locked);
2702
2703         if (request->rq_pool)
2704                 __ptlrpc_free_req_to_pool(request);
2705         else
2706                 ptlrpc_request_cache_free(request);
2707         EXIT;
2708 }
2709
2710 /**
2711  * Helper function
2712  * Drops one reference count for request \a request.
2713  * \a locked set indicates that caller holds import imp_lock.
2714  * Frees the request whe reference count reaches zero.
2715  *
2716  * \retval 1    the request is freed
2717  * \retval 0    some others still hold references on the request
2718  */
2719 static int __ptlrpc_req_put(struct ptlrpc_request *request, int locked)
2720 {
2721         int count;
2722
2723         ENTRY;
2724         if (!request)
2725                 RETURN(1);
2726
2727         LASSERT(request != LP_POISON);
2728         LASSERT(request->rq_reqmsg != LP_POISON);
2729
2730         DEBUG_REQ(D_INFO, request, "refcount now %u",
2731                   atomic_read(&request->rq_refcount) - 1);
2732
2733         spin_lock(&request->rq_lock);
2734         count = atomic_dec_return(&request->rq_refcount);
2735         LASSERTF(count >= 0, "Invalid ref count %d\n", count);
2736
2737         /*
2738          * For open RPC, the client does not know the EA size (LOV, ACL, and
2739          * so on) before replied, then the client has to reserve very large
2740          * reply buffer. Such buffer will not be released until the RPC freed.
2741          * Since The open RPC is replayable, we need to keep it in the replay
2742          * list until close. If there are a lot of files opened concurrently,
2743          * then the client may be OOM.
2744          *
2745          * If fact, it is unnecessary to keep reply buffer for open replay,
2746          * related EAs have already been saved via mdc_save_lovea() before
2747          * coming here. So it is safe to free the reply buffer some earlier
2748          * before releasing the RPC to avoid client OOM. LU-9514
2749          */
2750         if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
2751                 spin_lock(&request->rq_early_free_lock);
2752                 sptlrpc_cli_free_repbuf(request);
2753                 request->rq_repbuf = NULL;
2754                 request->rq_repbuf_len = 0;
2755                 request->rq_repdata = NULL;
2756                 request->rq_reqdata_len = 0;
2757                 spin_unlock(&request->rq_early_free_lock);
2758         }
2759         spin_unlock(&request->rq_lock);
2760
2761         if (!count)
2762                 __ptlrpc_free_req(request, locked);
2763
2764         RETURN(!count);
2765 }
2766
2767 /**
2768  * Drop one request reference. Must be called with import imp_lock held.
2769  * When reference count drops to zero, request is freed.
2770  */
2771 void ptlrpc_req_put_with_imp_lock(struct ptlrpc_request *request)
2772 {
2773         assert_spin_locked(&request->rq_import->imp_lock);
2774         (void)__ptlrpc_req_put(request, 1);
2775 }
2776
2777 /**
2778  * Drops one reference count for a request.
2779  */
2780 void ptlrpc_req_put(struct ptlrpc_request *request)
2781 {
2782         __ptlrpc_req_put(request, 0);
2783 }
2784 EXPORT_SYMBOL(ptlrpc_req_put);
2785
2786
2787 /**
2788  * Returns xid of a \a request
2789  */
2790 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2791 {
2792         return request->rq_xid;
2793 }
2794 EXPORT_SYMBOL(ptlrpc_req_xid);
2795
2796 /**
2797  * Disengage the client's reply buffer from the network
2798  * NB does _NOT_ unregister any client-side bulk.
2799  * IDEMPOTENT, but _not_ safe against concurrent callers.
2800  * The request owner (i.e. the thread doing the I/O) must call...
2801  * Returns 0 on success or 1 if unregistering cannot be made.
2802  */
2803 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2804 {
2805         bool discard;
2806         /*
2807          * Might sleep.
2808          */
2809         LASSERT(!in_interrupt());
2810
2811         /* Let's setup deadline for reply unlink. */
2812         if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2813             async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
2814                 request->rq_reply_deadline = ktime_get_real_seconds() +
2815                                              PTLRPC_REQ_LONG_UNLINK;
2816
2817         /*
2818          * Nothing left to do.
2819          */
2820         if (!ptlrpc_cli_wait_unlink(request))
2821                 RETURN(1);
2822
2823         LNetMDUnlink(request->rq_reply_md_h);
2824
2825         spin_lock(&request->rq_lock);
2826         discard = request->rq_reply_unlinked && !request->rq_req_unlinked;
2827         spin_unlock(&request->rq_lock);
2828
2829         if (discard) /* Discard the request-out callback */
2830                 __LNetMDUnlink(request->rq_req_md_h, discard);
2831
2832         /*
2833          * Let's check it once again.
2834          */
2835         if (!ptlrpc_cli_wait_unlink(request))
2836                 RETURN(1);
2837
2838         /* Move to "Unregistering" phase as reply was not unlinked yet. */
2839         ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
2840
2841         /*
2842          * Do not wait for unlink to finish.
2843          */
2844         if (async)
2845                 RETURN(0);
2846
2847         /*
2848          * We have to wait_event_idle_timeout() whatever the result, to get
2849          * a chance to run reply_in_callback(), and to make sure we've
2850          * unlinked before returning a req to the pool.
2851          */
2852         for (;;) {
2853                 wait_queue_head_t *wq = (request->rq_set) ?
2854                                         &request->rq_set->set_waitq :
2855                                         &request->rq_reply_waitq;
2856                 int seconds = PTLRPC_REQ_LONG_UNLINK;
2857                 /*
2858                  * Network access will complete in finite time but the HUGE
2859                  * timeout lets us CWARN for visibility of sluggish NALs
2860                  */
2861                 while (seconds > 0 &&
2862                        wait_event_idle_timeout(
2863                                *wq,
2864                                !ptlrpc_cli_wait_unlink(request),
2865                                cfs_time_seconds(1)) == 0)
2866                         seconds -= 1;
2867                 if (seconds > 0) {
2868                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2869                         RETURN(1);
2870                 }
2871
2872                 DEBUG_REQ(D_WARNING, request,
2873                           "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2874                           request->rq_receiving_reply,
2875                           request->rq_req_unlinked,
2876                           request->rq_reply_unlinked);
2877         }
2878         RETURN(0);
2879 }
2880
2881 static void ptlrpc_free_request(struct ptlrpc_request *req)
2882 {
2883         spin_lock(&req->rq_lock);
2884         req->rq_replay = 0;
2885         spin_unlock(&req->rq_lock);
2886
2887         if (req->rq_commit_cb)
2888                 req->rq_commit_cb(req);
2889         list_del_init(&req->rq_replay_list);
2890
2891         __ptlrpc_req_put(req, 1);
2892 }
2893
2894 /**
2895  * the request is committed and dropped from the replay list of its import
2896  */
2897 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2898 {
2899         struct obd_import *imp = req->rq_import;
2900
2901         spin_lock(&imp->imp_lock);
2902         if (list_empty(&req->rq_replay_list)) {
2903                 spin_unlock(&imp->imp_lock);
2904                 return;
2905         }
2906
2907         if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
2908                 if (imp->imp_replay_cursor == &req->rq_replay_list)
2909                         imp->imp_replay_cursor = req->rq_replay_list.next;
2910                 ptlrpc_free_request(req);
2911         }
2912
2913         spin_unlock(&imp->imp_lock);
2914 }
2915 EXPORT_SYMBOL(ptlrpc_request_committed);
2916
2917 /**
2918  * Iterates through replay_list on import and prunes
2919  * all requests have transno smaller than last_committed for the
2920  * import and don't have rq_replay set.
2921  * Since requests are sorted in transno order, stops when meeting first
2922  * transno bigger than last_committed.
2923  * caller must hold imp->imp_lock
2924  */
2925 void ptlrpc_free_committed(struct obd_import *imp)
2926 {
2927         struct ptlrpc_request *req, *saved;
2928         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
2929         bool skip_committed_list = true;
2930         unsigned int replay_scanned = 0, replay_freed = 0;
2931         unsigned int commit_scanned = 0, commit_freed = 0;
2932         unsigned int debug_level = D_INFO;
2933         __u64 peer_committed_transno;
2934         int imp_generation;
2935         time64_t start, now;
2936
2937         ENTRY;
2938         LASSERT(imp != NULL);
2939         assert_spin_locked(&imp->imp_lock);
2940
2941         start = ktime_get_seconds();
2942         /* save these here, we can potentially drop imp_lock after checking */
2943         peer_committed_transno = imp->imp_peer_committed_transno;
2944         imp_generation = imp->imp_generation;
2945
2946         if (peer_committed_transno == imp->imp_last_transno_checked &&
2947             imp_generation == imp->imp_last_generation_checked) {
2948                 CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
2949                        imp->imp_obd->obd_name, peer_committed_transno);
2950                 RETURN_EXIT;
2951         }
2952         CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
2953                imp->imp_obd->obd_name, peer_committed_transno, imp_generation);
2954
2955         if (imp_generation != imp->imp_last_generation_checked ||
2956             imp->imp_last_transno_checked == 0)
2957                 skip_committed_list = false;
2958         /* maybe drop imp_lock here, if another lock protected the lists */
2959
2960         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2961                                  rq_replay_list) {
2962                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2963                 LASSERT(req != last_req);
2964                 last_req = req;
2965
2966                 if (req->rq_transno == 0) {
2967                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2968                         LBUG();
2969                 }
2970
2971                 /* If other threads are waiting on imp_lock, stop processing
2972                  * in this thread. Another thread can finish remaining work.
2973                  * This may happen if there are huge numbers of open files
2974                  * that are closed suddenly or evicted, or if the server
2975                  * commit interval is very high vs. RPC rate.
2976                  */
2977                 if (++replay_scanned % 2048 == 0) {
2978                         now = ktime_get_seconds();
2979                         if (now > start + 5)
2980                                 debug_level = D_WARNING;
2981
2982                         if ((replay_freed > 128 && now > start + 3) &&
2983                             atomic_read(&imp->imp_waiting)) {
2984                                 if (debug_level == D_INFO)
2985                                         debug_level = D_RPCTRACE;
2986                                 break;
2987                         }
2988                 }
2989
2990                 if (req->rq_import_generation < imp_generation) {
2991                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2992                         GOTO(free_req, 0);
2993                 }
2994
2995                 /* not yet committed */
2996                 if (req->rq_transno > peer_committed_transno) {
2997                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2998                         break;
2999                 }
3000
3001                 if (req->rq_replay) {
3002                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
3003                         list_move_tail(&req->rq_replay_list,
3004                                        &imp->imp_committed_list);
3005                         continue;
3006                 }
3007
3008                 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
3009                           peer_committed_transno);
3010 free_req:
3011                 replay_freed++;
3012                 ptlrpc_free_request(req);
3013         }
3014
3015         if (skip_committed_list)
3016                 GOTO(out, 0);
3017
3018         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
3019                                  rq_replay_list) {
3020                 LASSERT(req->rq_transno != 0);
3021
3022                 /* If other threads are waiting on imp_lock, stop processing
3023                  * in this thread. Another thread can finish remaining work. */
3024                 if (++commit_scanned % 2048 == 0) {
3025                         now = ktime_get_seconds();
3026                         if (now > start + 6)
3027                                 debug_level = D_WARNING;
3028
3029                         if ((commit_freed > 128 && now > start + 4) &&
3030                             atomic_read(&imp->imp_waiting)) {
3031                                 if (debug_level == D_INFO)
3032                                         debug_level = D_RPCTRACE;
3033                                 break;
3034                         }
3035                 }
3036
3037                 if (req->rq_import_generation < imp_generation ||
3038                     !req->rq_replay) {
3039                         DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
3040                                   req->rq_import_generation <
3041                                   imp_generation ? "stale" : "closed");
3042
3043                         if (imp->imp_replay_cursor == &req->rq_replay_list)
3044                                 imp->imp_replay_cursor =
3045                                         req->rq_replay_list.next;
3046
3047                         commit_freed++;
3048                         ptlrpc_free_request(req);
3049                 }
3050         }
3051 out:
3052         /* if full lists processed without interruption, avoid next scan */
3053         if (debug_level == D_INFO) {
3054                 imp->imp_last_transno_checked = peer_committed_transno;
3055                 imp->imp_last_generation_checked = imp_generation;
3056         }
3057
3058         CDEBUG_LIMIT(debug_level,
3059                      "%s: %s: skip=%u replay=%u/%u committed=%u/%u\n",
3060                      imp->imp_obd->obd_name,
3061                      debug_level == D_INFO ? "normal" : "overloaded",
3062                      skip_committed_list, replay_freed, replay_scanned,
3063                      commit_freed, commit_scanned);
3064         EXIT;
3065 }
3066
3067 void ptlrpc_cleanup_client(struct obd_import *imp)
3068 {
3069         ENTRY;
3070         EXIT;
3071 }
3072
3073 /**
3074  * Schedule previously sent request for resend.
3075  * For bulk requests we assign new xid (to avoid problems with
3076  * lost replies and therefore several transfers landing into same buffer
3077  * from different sending attempts).
3078  */
3079 void ptlrpc_resend_req(struct ptlrpc_request *req)
3080 {
3081         DEBUG_REQ(D_HA, req, "going to resend");
3082         spin_lock(&req->rq_lock);
3083
3084         /*
3085          * Request got reply but linked to the import list still.
3086          * Let ptlrpc_check_set() process it.
3087          */
3088         if (ptlrpc_client_replied(req)) {
3089                 spin_unlock(&req->rq_lock);
3090                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
3091                 return;
3092         }
3093
3094         req->rq_status = -EAGAIN;
3095
3096         req->rq_resend = 1;
3097         req->rq_net_err = 0;
3098         req->rq_timedout = 0;
3099
3100         ptlrpc_client_wake_req(req);
3101         spin_unlock(&req->rq_lock);
3102 }
3103
3104 /* XXX: this function and rq_status are currently unused */
3105 void ptlrpc_restart_req(struct ptlrpc_request *req)
3106 {
3107         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
3108         req->rq_status = -ERESTARTSYS;
3109
3110         spin_lock(&req->rq_lock);
3111         req->rq_restart = 1;
3112         req->rq_timedout = 0;
3113         ptlrpc_client_wake_req(req);
3114         spin_unlock(&req->rq_lock);
3115 }
3116
3117 /**
3118  * Grab additional reference on a request \a req
3119  */
3120 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
3121 {
3122         ENTRY;
3123         atomic_inc(&req->rq_refcount);
3124         RETURN(req);
3125 }
3126 EXPORT_SYMBOL(ptlrpc_request_addref);
3127
3128 /**
3129  * Add a request to import replay_list.
3130  * Must be called under imp_lock
3131  */
3132 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
3133                                       struct obd_import *imp)
3134 {
3135         struct ptlrpc_request *iter;
3136
3137         assert_spin_locked(&imp->imp_lock);
3138
3139         if (req->rq_transno == 0) {
3140                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
3141                 LBUG();
3142         }
3143
3144         /*
3145          * clear this for new requests that were resent as well
3146          * as resent replayed requests.
3147          */
3148         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3149
3150         /* don't re-add requests that have been replayed */
3151         if (!list_empty(&req->rq_replay_list))
3152                 return;
3153
3154         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
3155
3156         spin_lock(&req->rq_lock);
3157         req->rq_resend = 0;
3158         spin_unlock(&req->rq_lock);
3159
3160         LASSERT(imp->imp_replayable);
3161         /* Balanced in ptlrpc_free_committed, usually. */
3162         ptlrpc_request_addref(req);
3163         list_for_each_entry_reverse(iter, &imp->imp_replay_list,
3164                                     rq_replay_list) {
3165                 /*
3166                  * We may have duplicate transnos if we create and then
3167                  * open a file, or for closes retained if to match creating
3168                  * opens, so use req->rq_xid as a secondary key.
3169                  * (See bugs 684, 685, and 428.)
3170                  * XXX no longer needed, but all opens need transnos!
3171                  */
3172                 if (iter->rq_transno > req->rq_transno)
3173                         continue;
3174
3175                 if (iter->rq_transno == req->rq_transno) {
3176                         LASSERT(iter->rq_xid != req->rq_xid);
3177                         if (iter->rq_xid > req->rq_xid)
3178                                 continue;
3179                 }
3180
3181                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
3182                 return;
3183         }
3184
3185         list_add(&req->rq_replay_list, &imp->imp_replay_list);
3186 }
3187
3188 /**
3189  * Send request and wait until it completes.
3190  * Returns request processing status.
3191  */
3192 int ptlrpc_queue_wait(struct ptlrpc_request *req)
3193 {
3194         struct ptlrpc_request_set *set;
3195         int rc;
3196
3197         ENTRY;
3198         LASSERT(req->rq_set == NULL);
3199         LASSERT(!req->rq_receiving_reply);
3200
3201         set = ptlrpc_prep_set();
3202         if (!set) {
3203                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
3204                 RETURN(-ENOMEM);
3205         }
3206
3207         /* for distributed debugging */
3208         lustre_msg_set_status(req->rq_reqmsg, current->pid);
3209
3210         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
3211         ptlrpc_request_addref(req);
3212         ptlrpc_set_add_req(set, req);
3213         rc = ptlrpc_set_wait(NULL, set);
3214         ptlrpc_set_destroy(set);
3215
3216         RETURN(rc);
3217 }
3218 EXPORT_SYMBOL(ptlrpc_queue_wait);
3219
3220 /**
3221  * Callback used for replayed requests reply processing.
3222  * In case of successful reply calls registered request replay callback.
3223  * In case of error restart replay process.
3224  */
3225 static int ptlrpc_replay_interpret(const struct lu_env *env,
3226                                    struct ptlrpc_request *req,
3227                                    void *args, int rc)
3228 {
3229         struct ptlrpc_replay_async_args *aa = args;
3230         struct obd_import *imp = req->rq_import;
3231
3232         ENTRY;
3233         atomic_dec(&imp->imp_replay_inflight);
3234
3235         /*
3236          * Note: if it is bulk replay (MDS-MDS replay), then even if
3237          * server got the request, but bulk transfer timeout, let's
3238          * replay the bulk req again
3239          */
3240         if (!ptlrpc_client_replied(req) ||
3241             (req->rq_bulk &&
3242              lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
3243                 DEBUG_REQ(D_ERROR, req, "request replay timed out");
3244                 GOTO(out, rc = -ETIMEDOUT);
3245         }
3246
3247         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
3248             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
3249             lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
3250                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
3251
3252         /** VBR: check version failure */
3253         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
3254                 /** replay was failed due to version mismatch */
3255                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay");
3256                 spin_lock(&imp->imp_lock);
3257                 imp->imp_vbr_failed = 1;
3258                 spin_unlock(&imp->imp_lock);
3259                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3260         } else {
3261                 /** The transno had better not change over replay. */
3262                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
3263                          lustre_msg_get_transno(req->rq_repmsg) ||
3264                          lustre_msg_get_transno(req->rq_repmsg) == 0,
3265                          "%#llx/%#llx\n",
3266                          lustre_msg_get_transno(req->rq_reqmsg),
3267                          lustre_msg_get_transno(req->rq_repmsg));
3268         }
3269
3270         spin_lock(&imp->imp_lock);
3271         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
3272         spin_unlock(&imp->imp_lock);
3273         LASSERT(imp->imp_last_replay_transno);
3274
3275         /* transaction number shouldn't be bigger than the latest replayed */
3276         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
3277                 DEBUG_REQ(D_ERROR, req,
3278                           "Reported transno=%llu is bigger than replayed=%llu",
3279                           req->rq_transno,
3280                           lustre_msg_get_transno(req->rq_reqmsg));
3281                 GOTO(out, rc = -EINVAL);
3282         }
3283
3284         DEBUG_REQ(D_HA, req, "got reply");
3285
3286         /* let the callback do fixups, possibly including in the request */
3287         if (req->rq_replay_cb)
3288                 req->rq_replay_cb(req);
3289
3290         if (ptlrpc_client_replied(req) &&
3291             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
3292                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
3293                           lustre_msg_get_status(req->rq_repmsg),
3294                           aa->praa_old_status);
3295
3296                 /*
3297                  * Note: If the replay fails for MDT-MDT recovery, let's
3298                  * abort all of the following requests in the replay
3299                  * and sending list, because MDT-MDT update requests
3300                  * are dependent on each other, see LU-7039
3301                  */
3302                 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
3303                         struct ptlrpc_request *free_req;
3304                         struct ptlrpc_request *tmp;
3305
3306                         spin_lock(&imp->imp_lock);
3307                         list_for_each_entry_safe(free_req, tmp,
3308                                                  &imp->imp_replay_list,
3309                                                  rq_replay_list) {
3310                                 ptlrpc_free_request(free_req);
3311                         }
3312
3313                         list_for_each_entry_safe(free_req, tmp,
3314                                                  &imp->imp_committed_list,
3315                                                  rq_replay_list) {
3316                                 ptlrpc_free_request(free_req);
3317                         }
3318
3319                         list_for_each_entry_safe(free_req, tmp,
3320                                                  &imp->imp_delayed_list,
3321                                                  rq_list) {
3322                                 spin_lock(&free_req->rq_lock);
3323                                 free_req->rq_err = 1;
3324                                 free_req->rq_status = -EIO;
3325                                 ptlrpc_client_wake_req(free_req);
3326                                 spin_unlock(&free_req->rq_lock);
3327                         }
3328
3329                         list_for_each_entry_safe(free_req, tmp,
3330                                                  &imp->imp_sending_list,
3331                                                  rq_list) {
3332                                 spin_lock(&free_req->rq_lock);
3333                                 free_req->rq_err = 1;
3334                                 free_req->rq_status = -EIO;
3335                                 ptlrpc_client_wake_req(free_req);
3336                                 spin_unlock(&free_req->rq_lock);
3337                         }
3338                         spin_unlock(&imp->imp_lock);
3339                 }
3340         } else {
3341                 /* Put it back for re-replay. */
3342                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3343         }
3344
3345         /*
3346          * Errors while replay can set transno to 0, but
3347          * imp_last_replay_transno shouldn't be set to 0 anyway
3348          */
3349         if (req->rq_transno == 0)
3350                 CERROR("Transno is 0 during replay!\n");
3351
3352         /* continue with recovery */
3353         rc = ptlrpc_import_recovery_state_machine(imp);
3354  out:
3355         req->rq_send_state = aa->praa_old_state;
3356
3357         if (rc != 0)
3358                 /* this replay failed, so restart recovery */
3359                 ptlrpc_connect_import(imp);
3360
3361         RETURN(rc);
3362 }
3363
3364 /**
3365  * Prepares and queues request for replay.
3366  * Adds it to ptlrpcd queue for actual sending.
3367  * Returns 0 on success.
3368  */
3369 int ptlrpc_replay_req(struct ptlrpc_request *req)
3370 {
3371         struct ptlrpc_replay_async_args *aa;
3372
3373         ENTRY;
3374
3375         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
3376
3377         CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_REPLAY_PAUSE, cfs_fail_val);
3378
3379         aa = ptlrpc_req_async_args(aa, req);
3380         memset(aa, 0, sizeof(*aa));
3381
3382         /* Prepare request to be resent with ptlrpcd */
3383         aa->praa_old_state = req->rq_send_state;
3384         req->rq_send_state = LUSTRE_IMP_REPLAY;
3385         req->rq_phase = RQ_PHASE_NEW;
3386         req->rq_next_phase = RQ_PHASE_UNDEFINED;
3387         if (req->rq_repmsg)
3388                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
3389         req->rq_status = 0;
3390         req->rq_interpret_reply = ptlrpc_replay_interpret;
3391         /* Readjust the timeout for current conditions */
3392         ptlrpc_at_set_req_timeout(req);
3393
3394         /* Tell server net_latency to calculate how long to wait for reply. */
3395         lustre_msg_set_service_timeout(req->rq_reqmsg,
3396                                        ptlrpc_at_get_net_latency(req));
3397         DEBUG_REQ(D_HA, req, "REPLAY");
3398
3399         atomic_inc(&req->rq_import->imp_replay_inflight);
3400         spin_lock(&req->rq_lock);
3401         req->rq_early_free_repbuf = 0;
3402         spin_unlock(&req->rq_lock);
3403         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
3404
3405         ptlrpcd_add_req(req);
3406         RETURN(0);
3407 }
3408
3409 /**
3410  * Aborts all in-flight request on import \a imp sending and delayed lists
3411  */
3412 void ptlrpc_abort_inflight(struct obd_import *imp)
3413 {
3414         struct ptlrpc_request *req;
3415         ENTRY;
3416
3417         /*
3418          * Make sure that no new requests get processed for this import.
3419          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
3420          * this flag and then putting requests on sending_list or delayed_list.
3421          */
3422         assert_spin_locked(&imp->imp_lock);
3423
3424         /*
3425          * XXX locking?  Maybe we should remove each request with the list
3426          * locked?  Also, how do we know if the requests on the list are
3427          * being freed at this time?
3428          */
3429         list_for_each_entry(req, &imp->imp_sending_list, rq_list) {
3430                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
3431
3432                 spin_lock(&req->rq_lock);
3433                 if (req->rq_import_generation < imp->imp_generation) {
3434                         req->rq_err = 1;
3435                         req->rq_status = -EIO;
3436                         ptlrpc_client_wake_req(req);
3437                 }
3438                 spin_unlock(&req->rq_lock);
3439         }
3440
3441         list_for_each_entry(req, &imp->imp_delayed_list, rq_list) {
3442                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
3443
3444                 spin_lock(&req->rq_lock);
3445                 if (req->rq_import_generation < imp->imp_generation) {
3446                         req->rq_err = 1;
3447                         req->rq_status = -EIO;
3448                         ptlrpc_client_wake_req(req);
3449                 }
3450                 spin_unlock(&req->rq_lock);
3451         }
3452
3453         /*
3454          * Last chance to free reqs left on the replay list, but we
3455          * will still leak reqs that haven't committed.
3456          */
3457         if (imp->imp_replayable)
3458                 ptlrpc_free_committed(imp);
3459
3460         EXIT;
3461 }
3462
3463 /**
3464  * Abort all uncompleted requests in request set \a set
3465  */
3466 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
3467 {
3468         struct ptlrpc_request *req;
3469
3470         LASSERT(set != NULL);
3471
3472         list_for_each_entry(req, &set->set_requests, rq_set_chain) {
3473                 spin_lock(&req->rq_lock);
3474                 if (req->rq_phase != RQ_PHASE_RPC) {
3475                         spin_unlock(&req->rq_lock);
3476                         continue;
3477                 }
3478
3479                 req->rq_err = 1;
3480                 req->rq_status = -EINTR;
3481                 ptlrpc_client_wake_req(req);
3482                 spin_unlock(&req->rq_lock);
3483         }
3484 }
3485
3486 /**
3487  * Initialize the XID for the node.  This is common among all requests on
3488  * this node, and only requires the property that it is monotonically
3489  * increasing.  It does not need to be sequential.  Since this is also used
3490  * as the RDMA match bits, it is important that a single client NOT have
3491  * the same match bits for two different in-flight requests, hence we do
3492  * NOT want to have an XID per target or similar.
3493  *
3494  * To avoid an unlikely collision between match bits after a client reboot
3495  * (which would deliver old data into the wrong RDMA buffer) initialize
3496  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
3497  * If the time is clearly incorrect, we instead use a 62-bit random number.
3498  * In the worst case the random number will overflow 1M RPCs per second in
3499  * 9133 years, or permutations thereof.
3500  */
3501 #define YEAR_2004 (1ULL << 30)
3502 void ptlrpc_init_xid(void)
3503 {
3504         time64_t now = ktime_get_real_seconds();
3505         u64 xid;
3506
3507         if (now < YEAR_2004) {
3508                 get_random_bytes(&xid, sizeof(xid));
3509                 xid >>= 2;
3510                 xid |= (1ULL << 61);
3511         } else {
3512                 xid = (u64)now << 20;
3513         }
3514
3515         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
3516         BUILD_BUG_ON((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) !=
3517                      0);
3518         xid &= PTLRPC_BULK_OPS_MASK;
3519         atomic64_set(&ptlrpc_last_xid, xid);
3520 }
3521
3522 /**
3523  * Increase xid and returns resulting new value to the caller.
3524  *
3525  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
3526  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
3527  * itself uses the last bulk xid needed, so the server can determine the
3528  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
3529  * xid must align to a power-of-two value.
3530  *
3531  * This is assumed to be true due to the initial ptlrpc_last_xid
3532  * value also being initialized to a power-of-two value. LU-1431
3533  */
3534 __u64 ptlrpc_next_xid(void)
3535 {
3536         return atomic64_add_return(PTLRPC_BULK_OPS_COUNT, &ptlrpc_last_xid);
3537 }
3538
3539 /**
3540  * If request has a new allocated XID (new request or EINPROGRESS resend),
3541  * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3542  * request to ensure previous bulk fails and avoid problems with lost replies
3543  * and therefore several transfers landing into the same buffer from different
3544  * sending attempts.
3545  * Also, to avoid previous reply landing to a different sending attempt.
3546  */
3547 void ptlrpc_set_mbits(struct ptlrpc_request *req)
3548 {
3549         int md_count = req->rq_bulk ? req->rq_bulk->bd_md_count : 1;
3550
3551         /*
3552          * Generate new matchbits for all resend requests, including
3553          * resend replay.
3554          */
3555         if (req->rq_resend) {
3556                 __u64 old_mbits = req->rq_mbits;
3557
3558                 /*
3559                  * First time resend on -EINPROGRESS will generate new xid,
3560                  * so we can actually use the rq_xid as rq_mbits in such case,
3561                  * however, it's bit hard to distinguish such resend with a
3562                  * 'resend for the -EINPROGRESS resend'. To make it simple,
3563                  * we opt to generate mbits for all resend cases.
3564                  */
3565                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3566                                  BULK_MBITS)) {
3567                         req->rq_mbits = ptlrpc_next_xid();
3568                 } else {
3569                         /*
3570                          * Old version transfers rq_xid to peer as
3571                          * matchbits.
3572                          */
3573                         spin_lock(&req->rq_import->imp_lock);
3574                         list_del_init(&req->rq_unreplied_list);
3575                         ptlrpc_assign_next_xid_nolock(req);
3576                         spin_unlock(&req->rq_import->imp_lock);
3577                         req->rq_mbits = req->rq_xid;
3578                 }
3579                 CDEBUG(D_HA, "resend with new mbits old x%llu new x%llu\n",
3580                        old_mbits, req->rq_mbits);
3581         } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
3582                 /* Request being sent first time, use xid as matchbits. */
3583                 if (OCD_HAS_FLAG(&req->rq_import->imp_connect_data,
3584                                  BULK_MBITS) || req->rq_mbits == 0)
3585                 {
3586                         req->rq_mbits = req->rq_xid;
3587                 } else {
3588                         req->rq_mbits -= md_count - 1;
3589                 }
3590         } else {
3591                 /*
3592                  * Replay request, xid and matchbits have already been
3593                  * correctly assigned.
3594                  */
3595                 return;
3596         }
3597
3598         /*
3599          * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3600          * that server can infer the number of bulks that were prepared,
3601          * see LU-1431
3602          */
3603         req->rq_mbits += md_count - 1;
3604
3605         /*
3606          * Set rq_xid as rq_mbits to indicate the final bulk for the old
3607          * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
3608          *
3609          * It's ok to directly set the rq_xid here, since this xid bump
3610          * won't affect the request position in unreplied list.
3611          */
3612         if (!OCD_HAS_FLAG(&req->rq_import->imp_connect_data, BULK_MBITS))
3613                 req->rq_xid = req->rq_mbits;
3614 }
3615
3616 /**
3617  * Get a glimpse at what next xid value might have been.
3618  * Returns possible next xid.
3619  */
3620 __u64 ptlrpc_sample_next_xid(void)
3621 {
3622         return atomic64_read(&ptlrpc_last_xid) + PTLRPC_BULK_OPS_COUNT;
3623 }
3624 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3625
3626 /**
3627  * Functions for operating ptlrpc workers.
3628  *
3629  * A ptlrpc work is a function which will be running inside ptlrpc context.
3630  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3631  *
3632  * 1. after a work is created, it can be used many times, that is:
3633  *         handler = ptlrpcd_alloc_work();
3634  *         ptlrpcd_queue_work();
3635  *
3636  *    queue it again when necessary:
3637  *         ptlrpcd_queue_work();
3638  *         ptlrpcd_destroy_work();
3639  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3640  *    it will only be queued once in any time. Also as its name implies, it may
3641  *    have delay before it really runs by ptlrpcd thread.
3642  */
3643 struct ptlrpc_work_async_args {
3644         int (*cb)(const struct lu_env *, void *);
3645         void *cbdata;
3646 };
3647
3648 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3649 {
3650         /* re-initialize the req */
3651         req->rq_timeout         = obd_timeout;
3652         req->rq_sent            = ktime_get_real_seconds();
3653         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3654         req->rq_phase           = RQ_PHASE_INTERPRET;
3655         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3656         req->rq_xid             = ptlrpc_next_xid();
3657         req->rq_import_generation = req->rq_import->imp_generation;
3658
3659         ptlrpcd_add_req(req);
3660 }
3661
3662 static int work_interpreter(const struct lu_env *env,
3663                             struct ptlrpc_request *req, void *args, int rc)
3664 {
3665         struct ptlrpc_work_async_args *arg = args;
3666
3667         LASSERT(ptlrpcd_check_work(req));
3668         LASSERT(arg->cb != NULL);
3669
3670         rc = arg->cb(env, arg->cbdata);
3671
3672         list_del_init(&req->rq_set_chain);
3673         req->rq_set = NULL;
3674
3675         if (atomic_dec_return(&req->rq_refcount) > 1) {
3676                 atomic_set(&req->rq_refcount, 2);
3677                 ptlrpcd_add_work_req(req);
3678         }
3679         return rc;
3680 }
3681
3682 static int worker_format;
3683
3684 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3685 {
3686         return req->rq_pill.rc_fmt == (void *)&worker_format;
3687 }
3688
3689 /**
3690  * Create a work for ptlrpc.
3691  */
3692 void *ptlrpcd_alloc_work(struct obd_import *imp,
3693                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3694 {
3695         struct ptlrpc_request *req = NULL;
3696         struct ptlrpc_work_async_args *args;
3697
3698         ENTRY;
3699         might_sleep();
3700
3701         if (!cb)
3702                 RETURN(ERR_PTR(-EINVAL));
3703
3704         /* copy some code from deprecated fakereq. */
3705         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3706         if (!req) {
3707                 CERROR("ptlrpc: run out of memory!\n");
3708                 RETURN(ERR_PTR(-ENOMEM));
3709         }
3710
3711         ptlrpc_cli_req_init(req);
3712
3713         req->rq_send_state = LUSTRE_IMP_FULL;
3714         req->rq_type = PTL_RPC_MSG_REQUEST;
3715         req->rq_import = class_import_get(imp);
3716         req->rq_interpret_reply = work_interpreter;
3717         /* don't want reply */
3718         req->rq_no_delay = req->rq_no_resend = 1;
3719         req->rq_pill.rc_fmt = (void *)&worker_format;
3720
3721         args = ptlrpc_req_async_args(args, req);
3722         args->cb     = cb;
3723         args->cbdata = cbdata;
3724
3725         RETURN(req);
3726 }
3727 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3728
3729 void ptlrpcd_destroy_work(void *handler)
3730 {
3731         struct ptlrpc_request *req = handler;
3732
3733         if (req)
3734                 ptlrpc_req_finished(req);
3735 }
3736 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3737
3738 int ptlrpcd_queue_work(void *handler)
3739 {
3740         struct ptlrpc_request *req = handler;
3741
3742         /*
3743          * Check if the req is already being queued.
3744          *
3745          * Here comes a trick: it lacks a way of checking if a req is being
3746          * processed reliably in ptlrpc. Here I have to use refcount of req
3747          * for this purpose. This is okay because the caller should use this
3748          * req as opaque data. - Jinshan
3749          */
3750         LASSERT(atomic_read(&req->rq_refcount) > 0);
3751         if (atomic_inc_return(&req->rq_refcount) == 2)
3752                 ptlrpcd_add_work_req(req);
3753         return 0;
3754 }
3755 EXPORT_SYMBOL(ptlrpcd_queue_work);