Whamcloud - gitweb
LU-12523 ptlrpc: Stop sending ptlrpc_body_v2
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 /** Implementation of client-side PortalRPC interfaces */
34
35 #define DEBUG_SUBSYSTEM S_RPC
36
37 #include <linux/delay.h>
38 #include <linux/random.h>
39
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_lib.h>
43 #include <lustre_ha.h>
44 #include <lustre_import.h>
45 #include <lustre_req_layout.h>
46
47 #include "ptlrpc_internal.h"
48
49 static void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
50                                       struct page *page, int pageoffset,
51                                       int len)
52 {
53         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
54 }
55
56 static void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
57                                         struct page *page, int pageoffset,
58                                         int len)
59 {
60         __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
61 }
62
63 static void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
64 {
65         int i;
66
67         for (i = 0; i < desc->bd_iov_count ; i++)
68                 put_page(BD_GET_KIOV(desc, i).kiov_page);
69 }
70
71 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
72         .add_kiov_frag  = ptlrpc_prep_bulk_page_pin,
73         .release_frags  = ptlrpc_release_bulk_page_pin,
74 };
75 EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
76
77 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
78         .add_kiov_frag  = ptlrpc_prep_bulk_page_nopin,
79         .release_frags  = ptlrpc_release_bulk_noop,
80 };
81 EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
82
83 const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kvec_ops = {
84         .add_iov_frag = ptlrpc_prep_bulk_frag,
85 };
86 EXPORT_SYMBOL(ptlrpc_bulk_kvec_ops);
87
88 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
89 static int ptlrpcd_check_work(struct ptlrpc_request *req);
90 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
91
92 /**
93  * Initialize passed in client structure \a cl.
94  */
95 void ptlrpc_init_client(int req_portal, int rep_portal, const char *name,
96                         struct ptlrpc_client *cl)
97 {
98         cl->cli_request_portal = req_portal;
99         cl->cli_reply_portal   = rep_portal;
100         cl->cli_name           = name;
101 }
102 EXPORT_SYMBOL(ptlrpc_init_client);
103
104 /**
105  * Return PortalRPC connection for remore uud \a uuid
106  */
107 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
108                                                     lnet_nid_t nid4refnet)
109 {
110         struct ptlrpc_connection *c;
111         lnet_nid_t self;
112         struct lnet_process_id peer;
113         int err;
114
115         /*
116          * ptlrpc_uuid_to_peer() initializes its 2nd parameter
117          * before accessing its values.
118          */
119         /* coverity[uninit_use_in_call] */
120         peer.nid = nid4refnet;
121         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
122         if (err != 0) {
123                 CNETERR("cannot find peer %s!\n", uuid->uuid);
124                 return NULL;
125         }
126
127         c = ptlrpc_connection_get(peer, self, uuid);
128         if (c) {
129                 memcpy(c->c_remote_uuid.uuid,
130                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
131         }
132
133         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
134
135         return c;
136 }
137
138 /**
139  * Allocate and initialize new bulk descriptor on the sender.
140  * Returns pointer to the descriptor or NULL on error.
141  */
142 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
143                                          unsigned int max_brw,
144                                          enum ptlrpc_bulk_op_type type,
145                                          unsigned int portal,
146                                          const struct ptlrpc_bulk_frag_ops *ops)
147 {
148         struct ptlrpc_bulk_desc *desc;
149         int i;
150
151         /* ensure that only one of KIOV or IOVEC is set but not both */
152         LASSERT((ptlrpc_is_bulk_desc_kiov(type) &&
153                  ops->add_kiov_frag != NULL) ||
154                 (ptlrpc_is_bulk_desc_kvec(type) &&
155                  ops->add_iov_frag != NULL));
156
157         OBD_ALLOC_PTR(desc);
158         if (!desc)
159                 return NULL;
160         if (type & PTLRPC_BULK_BUF_KIOV) {
161                 OBD_ALLOC_LARGE(GET_KIOV(desc),
162                                 nfrags * sizeof(*GET_KIOV(desc)));
163                 if (!GET_KIOV(desc))
164                         goto out;
165         } else {
166                 OBD_ALLOC_LARGE(GET_KVEC(desc),
167                                 nfrags * sizeof(*GET_KVEC(desc)));
168                 if (!GET_KVEC(desc))
169                         goto out;
170         }
171
172         spin_lock_init(&desc->bd_lock);
173         init_waitqueue_head(&desc->bd_waitq);
174         desc->bd_max_iov = nfrags;
175         desc->bd_iov_count = 0;
176         desc->bd_portal = portal;
177         desc->bd_type = type;
178         desc->bd_md_count = 0;
179         desc->bd_frag_ops = ops;
180         LASSERT(max_brw > 0);
181         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
182         /*
183          * PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
184          * node. Negotiated ocd_brw_size will always be <= this number.
185          */
186         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
187                 LNetInvalidateMDHandle(&desc->bd_mds[i]);
188
189         return desc;
190 out:
191         OBD_FREE_PTR(desc);
192         return NULL;
193 }
194
195 /**
196  * Prepare bulk descriptor for specified outgoing request \a req that
197  * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
198  * the bulk to be sent. Used on client-side.
199  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
200  * error.
201  */
202 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
203                                               unsigned int nfrags,
204                                               unsigned int max_brw,
205                                               unsigned int type,
206                                               unsigned int portal,
207                                               const struct ptlrpc_bulk_frag_ops
208                                                 *ops)
209 {
210         struct obd_import *imp = req->rq_import;
211         struct ptlrpc_bulk_desc *desc;
212
213         ENTRY;
214         LASSERT(ptlrpc_is_bulk_op_passive(type));
215
216         desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
217         if (!desc)
218                 RETURN(NULL);
219
220         desc->bd_import_generation = req->rq_import_generation;
221         desc->bd_import = class_import_get(imp);
222         desc->bd_req = req;
223
224         desc->bd_cbid.cbid_fn  = client_bulk_callback;
225         desc->bd_cbid.cbid_arg = desc;
226
227         /* This makes req own desc, and free it when she frees herself */
228         req->rq_bulk = desc;
229
230         return desc;
231 }
232 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
233
234 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
235                              struct page *page, int pageoffset, int len,
236                              int pin)
237 {
238         lnet_kiov_t *kiov;
239
240         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
241         LASSERT(page != NULL);
242         LASSERT(pageoffset >= 0);
243         LASSERT(len > 0);
244         LASSERT(pageoffset + len <= PAGE_SIZE);
245         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
246
247         kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
248
249         desc->bd_nob += len;
250
251         if (pin)
252                 get_page(page);
253
254         kiov->kiov_page = page;
255         kiov->kiov_offset = pageoffset;
256         kiov->kiov_len = len;
257
258         desc->bd_iov_count++;
259 }
260 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
261
262 int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
263                           void *frag, int len)
264 {
265         struct kvec *iovec;
266
267         ENTRY;
268
269         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
270         LASSERT(frag != NULL);
271         LASSERT(len > 0);
272         LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
273
274         iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
275
276         desc->bd_nob += len;
277
278         iovec->iov_base = frag;
279         iovec->iov_len = len;
280
281         desc->bd_iov_count++;
282
283         RETURN(desc->bd_nob);
284 }
285 EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
286
287 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
288 {
289         ENTRY;
290
291         LASSERT(desc != NULL);
292         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
293         LASSERT(desc->bd_md_count == 0);         /* network hands off */
294         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
295         LASSERT(desc->bd_frag_ops != NULL);
296
297         if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
298                 sptlrpc_enc_pool_put_pages(desc);
299
300         if (desc->bd_export)
301                 class_export_put(desc->bd_export);
302         else
303                 class_import_put(desc->bd_import);
304
305         if (desc->bd_frag_ops->release_frags != NULL)
306                 desc->bd_frag_ops->release_frags(desc);
307
308         if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
309                 OBD_FREE_LARGE(GET_KIOV(desc),
310                                desc->bd_max_iov * sizeof(*GET_KIOV(desc)));
311         else
312                 OBD_FREE_LARGE(GET_KVEC(desc),
313                                desc->bd_max_iov * sizeof(*GET_KVEC(desc)));
314         OBD_FREE_PTR(desc);
315         EXIT;
316 }
317 EXPORT_SYMBOL(ptlrpc_free_bulk);
318
319 /**
320  * Set server timelimit for this req, i.e. how long are we willing to wait
321  * for reply before timing out this request.
322  */
323 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
324 {
325         __u32 serv_est;
326         int idx;
327         struct imp_at *at;
328
329         LASSERT(req->rq_import);
330
331         if (AT_OFF) {
332                 /* non-AT settings */
333                 /**
334                  * \a imp_server_timeout means this is reverse import and
335                  * we send (currently only) ASTs to the client and cannot afford
336                  * to wait too long for the reply, otherwise the other client
337                  * (because of which we are sending this request) would
338                  * timeout waiting for us
339                  */
340                 req->rq_timeout = req->rq_import->imp_server_timeout ?
341                                   obd_timeout / 2 : obd_timeout;
342         } else {
343                 at = &req->rq_import->imp_at;
344                 idx = import_at_get_index(req->rq_import,
345                                           req->rq_request_portal);
346                 serv_est = at_get(&at->iat_service_estimate[idx]);
347                 req->rq_timeout = at_est2timeout(serv_est);
348         }
349         /*
350          * We could get even fancier here, using history to predict increased
351          * loading...
352          */
353
354         /*
355          * Let the server know what this RPC timeout is by putting it in the
356          * reqmsg
357          */
358         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
359 }
360 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
361
362 /* Adjust max service estimate based on server value */
363 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
364                                   unsigned int serv_est)
365 {
366         int idx;
367         unsigned int oldse;
368         struct imp_at *at;
369
370         LASSERT(req->rq_import);
371         at = &req->rq_import->imp_at;
372
373         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
374         /*
375          * max service estimates are tracked on the server side,
376          * so just keep minimal history here
377          */
378         oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
379         if (oldse != 0)
380                 CDEBUG(D_ADAPTTO,
381                        "The RPC service estimate for %s ptl %d has changed from %d to %d\n",
382                        req->rq_import->imp_obd->obd_name,
383                        req->rq_request_portal,
384                        oldse, at_get(&at->iat_service_estimate[idx]));
385 }
386
387 /* Expected network latency per remote node (secs) */
388 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
389 {
390         return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
391 }
392
393 /* Adjust expected network latency */
394 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
395                                unsigned int service_time)
396 {
397         unsigned int nl, oldnl;
398         struct imp_at *at;
399         time64_t now = ktime_get_real_seconds();
400
401         LASSERT(req->rq_import);
402
403         if (service_time > now - req->rq_sent + 3) {
404                 /*
405                  * b=16408, however, this can also happen if early reply
406                  * is lost and client RPC is expired and resent, early reply
407                  * or reply of original RPC can still be fit in reply buffer
408                  * of resent RPC, now client is measuring time from the
409                  * resent time, but server sent back service time of original
410                  * RPC.
411                  */
412                 CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
413                        D_ADAPTTO : D_WARNING,
414                        "Reported service time %u > total measured time %lld\n",
415                        service_time, now - req->rq_sent);
416                 return;
417         }
418
419         /* Network latency is total time less server processing time */
420         nl = max_t(int, now - req->rq_sent -
421                         service_time, 0) + 1; /* st rounding */
422         at = &req->rq_import->imp_at;
423
424         oldnl = at_measured(&at->iat_net_latency, nl);
425         if (oldnl != 0)
426                 CDEBUG(D_ADAPTTO,
427                        "The network latency for %s (nid %s) has changed from %d to %d\n",
428                        req->rq_import->imp_obd->obd_name,
429                        obd_uuid2str(&req->rq_import->imp_connection->c_remote_uuid),
430                        oldnl, at_get(&at->iat_net_latency));
431 }
432
433 static int unpack_reply(struct ptlrpc_request *req)
434 {
435         int rc;
436
437         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
438                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
439                 if (rc) {
440                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
441                         return -EPROTO;
442                 }
443         }
444
445         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
446         if (rc) {
447                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
448                 return -EPROTO;
449         }
450         return 0;
451 }
452
453 /**
454  * Handle an early reply message, called with the rq_lock held.
455  * If anything goes wrong just ignore it - same as if it never happened
456  */
457 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
458 __must_hold(&req->rq_lock)
459 {
460         struct ptlrpc_request *early_req;
461         time64_t olddl;
462         int rc;
463
464         ENTRY;
465         req->rq_early = 0;
466         spin_unlock(&req->rq_lock);
467
468         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
469         if (rc) {
470                 spin_lock(&req->rq_lock);
471                 RETURN(rc);
472         }
473
474         rc = unpack_reply(early_req);
475         if (rc != 0) {
476                 sptlrpc_cli_finish_early_reply(early_req);
477                 spin_lock(&req->rq_lock);
478                 RETURN(rc);
479         }
480
481         /*
482          * Use new timeout value just to adjust the local value for this
483          * request, don't include it into at_history. It is unclear yet why
484          * service time increased and should it be counted or skipped, e.g.
485          * that can be recovery case or some error or server, the real reply
486          * will add all new data if it is worth to add.
487          */
488         req->rq_timeout = lustre_msg_get_timeout(early_req->rq_repmsg);
489         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
490
491         /* Network latency can be adjusted, it is pure network delays */
492         ptlrpc_at_adj_net_latency(req,
493                                   lustre_msg_get_service_time(early_req->rq_repmsg));
494
495         sptlrpc_cli_finish_early_reply(early_req);
496
497         spin_lock(&req->rq_lock);
498         olddl = req->rq_deadline;
499         /*
500          * server assumes it now has rq_timeout from when the request
501          * arrived, so the client should give it at least that long.
502          * since we don't know the arrival time we'll use the original
503          * sent time
504          */
505         req->rq_deadline = req->rq_sent + req->rq_timeout +
506                            ptlrpc_at_get_net_latency(req);
507
508         DEBUG_REQ(D_ADAPTTO, req,
509                   "Early reply #%d, new deadline in %llds (%llds)",
510                   req->rq_early_count,
511                   req->rq_deadline - ktime_get_real_seconds(),
512                   req->rq_deadline - olddl);
513
514         RETURN(rc);
515 }
516
517 static struct kmem_cache *request_cache;
518
519 int ptlrpc_request_cache_init(void)
520 {
521         request_cache = kmem_cache_create("ptlrpc_cache",
522                                           sizeof(struct ptlrpc_request),
523                                           0, SLAB_HWCACHE_ALIGN, NULL);
524         return request_cache ? 0 : -ENOMEM;
525 }
526
527 void ptlrpc_request_cache_fini(void)
528 {
529         kmem_cache_destroy(request_cache);
530 }
531
532 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
533 {
534         struct ptlrpc_request *req;
535
536         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
537         return req;
538 }
539
540 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
541 {
542         OBD_SLAB_FREE_PTR(req, request_cache);
543 }
544
545 /**
546  * Wind down request pool \a pool.
547  * Frees all requests from the pool too
548  */
549 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
550 {
551         struct list_head *l, *tmp;
552         struct ptlrpc_request *req;
553
554         LASSERT(pool != NULL);
555
556         spin_lock(&pool->prp_lock);
557         list_for_each_safe(l, tmp, &pool->prp_req_list) {
558                 req = list_entry(l, struct ptlrpc_request, rq_list);
559                 list_del(&req->rq_list);
560                 LASSERT(req->rq_reqbuf);
561                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
562                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
563                 ptlrpc_request_cache_free(req);
564         }
565         spin_unlock(&pool->prp_lock);
566         OBD_FREE(pool, sizeof(*pool));
567 }
568 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
569
570 /**
571  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
572  */
573 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
574 {
575         int i;
576         int size = 1;
577
578         while (size < pool->prp_rq_size)
579                 size <<= 1;
580
581         LASSERTF(list_empty(&pool->prp_req_list) ||
582                  size == pool->prp_rq_size,
583                  "Trying to change pool size with nonempty pool from %d to %d bytes\n",
584                  pool->prp_rq_size, size);
585
586         pool->prp_rq_size = size;
587         for (i = 0; i < num_rq; i++) {
588                 struct ptlrpc_request *req;
589                 struct lustre_msg *msg;
590
591                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
592                 if (!req)
593                         return i;
594                 OBD_ALLOC_LARGE(msg, size);
595                 if (!msg) {
596                         ptlrpc_request_cache_free(req);
597                         return i;
598                 }
599                 req->rq_reqbuf = msg;
600                 req->rq_reqbuf_len = size;
601                 req->rq_pool = pool;
602                 spin_lock(&pool->prp_lock);
603                 list_add_tail(&req->rq_list, &pool->prp_req_list);
604                 spin_unlock(&pool->prp_lock);
605         }
606         return num_rq;
607 }
608 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
609
610 /**
611  * Create and initialize new request pool with given attributes:
612  * \a num_rq - initial number of requests to create for the pool
613  * \a msgsize - maximum message size possible for requests in thid pool
614  * \a populate_pool - function to be called when more requests need to be added
615  *                    to the pool
616  * Returns pointer to newly created pool or NULL on error.
617  */
618 struct ptlrpc_request_pool *
619 ptlrpc_init_rq_pool(int num_rq, int msgsize,
620                     int (*populate_pool)(struct ptlrpc_request_pool *, int))
621 {
622         struct ptlrpc_request_pool *pool;
623
624         OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
625         if (!pool)
626                 return NULL;
627
628         /*
629          * Request next power of two for the allocation, because internally
630          * kernel would do exactly this
631          */
632         spin_lock_init(&pool->prp_lock);
633         INIT_LIST_HEAD(&pool->prp_req_list);
634         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
635         pool->prp_populate = populate_pool;
636
637         populate_pool(pool, num_rq);
638
639         return pool;
640 }
641 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
642
643 /**
644  * Fetches one request from pool \a pool
645  */
646 static struct ptlrpc_request *
647 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
648 {
649         struct ptlrpc_request *request;
650         struct lustre_msg *reqbuf;
651
652         if (!pool)
653                 return NULL;
654
655         spin_lock(&pool->prp_lock);
656
657         /*
658          * See if we have anything in a pool, and bail out if nothing,
659          * in writeout path, where this matters, this is safe to do, because
660          * nothing is lost in this case, and when some in-flight requests
661          * complete, this code will be called again.
662          */
663         if (unlikely(list_empty(&pool->prp_req_list))) {
664                 spin_unlock(&pool->prp_lock);
665                 return NULL;
666         }
667
668         request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
669                              rq_list);
670         list_del_init(&request->rq_list);
671         spin_unlock(&pool->prp_lock);
672
673         LASSERT(request->rq_reqbuf);
674         LASSERT(request->rq_pool);
675
676         reqbuf = request->rq_reqbuf;
677         memset(request, 0, sizeof(*request));
678         request->rq_reqbuf = reqbuf;
679         request->rq_reqbuf_len = pool->prp_rq_size;
680         request->rq_pool = pool;
681
682         return request;
683 }
684
685 /**
686  * Returns freed \a request to pool.
687  */
688 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
689 {
690         struct ptlrpc_request_pool *pool = request->rq_pool;
691
692         spin_lock(&pool->prp_lock);
693         LASSERT(list_empty(&request->rq_list));
694         LASSERT(!request->rq_receiving_reply);
695         list_add_tail(&request->rq_list, &pool->prp_req_list);
696         spin_unlock(&pool->prp_lock);
697 }
698
699 void ptlrpc_add_unreplied(struct ptlrpc_request *req)
700 {
701         struct obd_import *imp = req->rq_import;
702         struct list_head *tmp;
703         struct ptlrpc_request *iter;
704
705         assert_spin_locked(&imp->imp_lock);
706         LASSERT(list_empty(&req->rq_unreplied_list));
707
708         /* unreplied list is sorted by xid in ascending order */
709         list_for_each_prev(tmp, &imp->imp_unreplied_list) {
710                 iter = list_entry(tmp, struct ptlrpc_request,
711                                   rq_unreplied_list);
712
713                 LASSERT(req->rq_xid != iter->rq_xid);
714                 if (req->rq_xid < iter->rq_xid)
715                         continue;
716                 list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
717                 return;
718         }
719         list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
720 }
721
722 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
723 {
724         req->rq_xid = ptlrpc_next_xid();
725         ptlrpc_add_unreplied(req);
726 }
727
728 static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
729 {
730         spin_lock(&req->rq_import->imp_lock);
731         ptlrpc_assign_next_xid_nolock(req);
732         spin_unlock(&req->rq_import->imp_lock);
733 }
734
735 static atomic64_t ptlrpc_last_xid;
736
737 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
738                              __u32 version, int opcode, char **bufs,
739                              struct ptlrpc_cli_ctx *ctx)
740 {
741         int count;
742         struct obd_import *imp;
743         __u32 *lengths;
744         int rc;
745
746         ENTRY;
747
748         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
749         imp = request->rq_import;
750         lengths = request->rq_pill.rc_area[RCL_CLIENT];
751
752         if (ctx) {
753                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
754         } else {
755                 rc = sptlrpc_req_get_ctx(request);
756                 if (rc)
757                         GOTO(out_free, rc);
758         }
759         sptlrpc_req_set_flavor(request, opcode);
760
761         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
762                                  lengths, bufs);
763         if (rc)
764                 GOTO(out_ctx, rc);
765
766         lustre_msg_add_version(request->rq_reqmsg, version);
767         request->rq_send_state = LUSTRE_IMP_FULL;
768         request->rq_type = PTL_RPC_MSG_REQUEST;
769
770         request->rq_req_cbid.cbid_fn  = request_out_callback;
771         request->rq_req_cbid.cbid_arg = request;
772
773         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
774         request->rq_reply_cbid.cbid_arg = request;
775
776         request->rq_reply_deadline = 0;
777         request->rq_bulk_deadline = 0;
778         request->rq_req_deadline = 0;
779         request->rq_phase = RQ_PHASE_NEW;
780         request->rq_next_phase = RQ_PHASE_UNDEFINED;
781
782         request->rq_request_portal = imp->imp_client->cli_request_portal;
783         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
784
785         ptlrpc_at_set_req_timeout(request);
786
787         lustre_msg_set_opc(request->rq_reqmsg, opcode);
788
789         /* Let's setup deadline for req/reply/bulk unlink for opcode. */
790         if (cfs_fail_val == opcode) {
791                 time64_t *fail_t = NULL, *fail2_t = NULL;
792
793                 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
794                         fail_t = &request->rq_bulk_deadline;
795                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
796                         fail_t = &request->rq_reply_deadline;
797                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
798                         fail_t = &request->rq_req_deadline;
799                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
800                         fail_t = &request->rq_reply_deadline;
801                         fail2_t = &request->rq_bulk_deadline;
802                 } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_ROUND_XID)) {
803                         time64_t now = ktime_get_real_seconds();
804                         u64 xid = ((u64)now >> 4) << 24;
805
806                         atomic64_set(&ptlrpc_last_xid, xid);
807                 }
808
809                 if (fail_t) {
810                         *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
811
812                         if (fail2_t)
813                                 *fail2_t = ktime_get_real_seconds() +
814                                            LONG_UNLINK;
815
816                         /*
817                          * The RPC is infected, let the test to change the
818                          * fail_loc
819                          */
820                         msleep(4 * MSEC_PER_SEC);
821                 }
822         }
823         ptlrpc_assign_next_xid(request);
824
825         RETURN(0);
826
827 out_ctx:
828         LASSERT(!request->rq_pool);
829         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
830 out_free:
831         class_import_put(imp);
832
833         return rc;
834 }
835 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
836
837 /**
838  * Pack request buffers for network transfer, performing necessary encryption
839  * steps if necessary.
840  */
841 int ptlrpc_request_pack(struct ptlrpc_request *request,
842                         __u32 version, int opcode)
843 {
844         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
845 }
846 EXPORT_SYMBOL(ptlrpc_request_pack);
847
848 /**
849  * Helper function to allocate new request on import \a imp
850  * and possibly using existing request from pool \a pool if provided.
851  * Returns allocated request structure with import field filled or
852  * NULL on error.
853  */
854 static inline
855 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
856                                               struct ptlrpc_request_pool *pool)
857 {
858         struct ptlrpc_request *request = NULL;
859
860         request = ptlrpc_request_cache_alloc(GFP_NOFS);
861
862         if (!request && pool)
863                 request = ptlrpc_prep_req_from_pool(pool);
864
865         if (request) {
866                 ptlrpc_cli_req_init(request);
867
868                 LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
869                 LASSERT(imp != LP_POISON);
870                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
871                          imp->imp_client);
872                 LASSERT(imp->imp_client != LP_POISON);
873
874                 request->rq_import = class_import_get(imp);
875         } else {
876                 CERROR("request allocation out of memory\n");
877         }
878
879         return request;
880 }
881
882 /**
883  * Helper function for creating a request.
884  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
885  * buffer structures according to capsule template \a format.
886  * Returns allocated request structure pointer or NULL on error.
887  */
888 static struct ptlrpc_request *
889 ptlrpc_request_alloc_internal(struct obd_import *imp,
890                               struct ptlrpc_request_pool *pool,
891                               const struct req_format *format)
892 {
893         struct ptlrpc_request *request;
894         int connect = 0;
895
896         request = __ptlrpc_request_alloc(imp, pool);
897         if (!request)
898                 return NULL;
899
900         /*
901          * initiate connection if needed when the import has been
902          * referenced by the new request to avoid races with disconnect
903          */
904         if (unlikely(imp->imp_state == LUSTRE_IMP_IDLE)) {
905                 int rc;
906
907                 CDEBUG_LIMIT(imp->imp_idle_debug,
908                              "%s: reconnect after %llds idle\n",
909                              imp->imp_obd->obd_name, ktime_get_real_seconds() -
910                                                      imp->imp_last_reply_time);
911                 spin_lock(&imp->imp_lock);
912                 if (imp->imp_state == LUSTRE_IMP_IDLE) {
913                         imp->imp_generation++;
914                         imp->imp_initiated_at = imp->imp_generation;
915                         imp->imp_state =  LUSTRE_IMP_NEW;
916                         connect = 1;
917                 }
918                 spin_unlock(&imp->imp_lock);
919                 if (connect) {
920                         rc = ptlrpc_connect_import(imp);
921                         if (rc < 0) {
922                                 ptlrpc_request_free(request);
923                                 return NULL;
924                         }
925                         ptlrpc_pinger_add_import(imp);
926                 }
927         }
928
929         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
930         req_capsule_set(&request->rq_pill, format);
931         return request;
932 }
933
934 /**
935  * Allocate new request structure for import \a imp and initialize its
936  * buffer structure according to capsule template \a format.
937  */
938 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
939                                             const struct req_format *format)
940 {
941         return ptlrpc_request_alloc_internal(imp, NULL, format);
942 }
943 EXPORT_SYMBOL(ptlrpc_request_alloc);
944
945 /**
946  * Allocate new request structure for import \a imp from pool \a pool and
947  * initialize its buffer structure according to capsule template \a format.
948  */
949 struct ptlrpc_request *
950 ptlrpc_request_alloc_pool(struct obd_import *imp,
951                           struct ptlrpc_request_pool *pool,
952                           const struct req_format *format)
953 {
954         return ptlrpc_request_alloc_internal(imp, pool, format);
955 }
956 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
957
958 /**
959  * For requests not from pool, free memory of the request structure.
960  * For requests obtained from a pool earlier, return request back to pool.
961  */
962 void ptlrpc_request_free(struct ptlrpc_request *request)
963 {
964         if (request->rq_pool)
965                 __ptlrpc_free_req_to_pool(request);
966         else
967                 ptlrpc_request_cache_free(request);
968 }
969 EXPORT_SYMBOL(ptlrpc_request_free);
970
971 /**
972  * Allocate new request for operatione \a opcode and immediatelly pack it for
973  * network transfer.
974  * Only used for simple requests like OBD_PING where the only important
975  * part of the request is operation itself.
976  * Returns allocated request or NULL on error.
977  */
978 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
979                                                  const struct req_format *format,
980                                                  __u32 version, int opcode)
981 {
982         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
983         int rc;
984
985         if (req) {
986                 rc = ptlrpc_request_pack(req, version, opcode);
987                 if (rc) {
988                         ptlrpc_request_free(req);
989                         req = NULL;
990                 }
991         }
992         return req;
993 }
994 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
995
996 /**
997  * Allocate and initialize new request set structure on the current CPT.
998  * Returns a pointer to the newly allocated set structure or NULL on error.
999  */
1000 struct ptlrpc_request_set *ptlrpc_prep_set(void)
1001 {
1002         struct ptlrpc_request_set *set;
1003         int cpt;
1004
1005         ENTRY;
1006         cpt = cfs_cpt_current(cfs_cpt_table, 0);
1007         OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof(*set));
1008         if (!set)
1009                 RETURN(NULL);
1010         atomic_set(&set->set_refcount, 1);
1011         INIT_LIST_HEAD(&set->set_requests);
1012         init_waitqueue_head(&set->set_waitq);
1013         atomic_set(&set->set_new_count, 0);
1014         atomic_set(&set->set_remaining, 0);
1015         spin_lock_init(&set->set_new_req_lock);
1016         INIT_LIST_HEAD(&set->set_new_requests);
1017         set->set_max_inflight = UINT_MAX;
1018         set->set_producer     = NULL;
1019         set->set_producer_arg = NULL;
1020         set->set_rc           = 0;
1021
1022         RETURN(set);
1023 }
1024 EXPORT_SYMBOL(ptlrpc_prep_set);
1025
1026 /**
1027  * Allocate and initialize new request set structure with flow control
1028  * extension. This extension allows to control the number of requests in-flight
1029  * for the whole set. A callback function to generate requests must be provided
1030  * and the request set will keep the number of requests sent over the wire to
1031  * @max_inflight.
1032  * Returns a pointer to the newly allocated set structure or NULL on error.
1033  */
1034 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
1035                                              void *arg)
1036
1037 {
1038         struct ptlrpc_request_set *set;
1039
1040         set = ptlrpc_prep_set();
1041         if (!set)
1042                 RETURN(NULL);
1043
1044         set->set_max_inflight  = max;
1045         set->set_producer      = func;
1046         set->set_producer_arg  = arg;
1047
1048         RETURN(set);
1049 }
1050
1051 /**
1052  * Wind down and free request set structure previously allocated with
1053  * ptlrpc_prep_set.
1054  * Ensures that all requests on the set have completed and removes
1055  * all requests from the request list in a set.
1056  * If any unsent request happen to be on the list, pretends that they got
1057  * an error in flight and calls their completion handler.
1058  */
1059 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
1060 {
1061         struct list_head *tmp;
1062         struct list_head *next;
1063         int expected_phase;
1064         int n = 0;
1065
1066         ENTRY;
1067
1068         /* Requests on the set should either all be completed, or all be new */
1069         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
1070                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
1071         list_for_each(tmp, &set->set_requests) {
1072                 struct ptlrpc_request *req =
1073                         list_entry(tmp, struct ptlrpc_request,
1074                                    rq_set_chain);
1075
1076                 LASSERT(req->rq_phase == expected_phase);
1077                 n++;
1078         }
1079
1080         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
1081                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
1082                  atomic_read(&set->set_remaining), n);
1083
1084         list_for_each_safe(tmp, next, &set->set_requests) {
1085                 struct ptlrpc_request *req =
1086                         list_entry(tmp, struct ptlrpc_request,
1087                                    rq_set_chain);
1088                 list_del_init(&req->rq_set_chain);
1089
1090                 LASSERT(req->rq_phase == expected_phase);
1091
1092                 if (req->rq_phase == RQ_PHASE_NEW) {
1093                         ptlrpc_req_interpret(NULL, req, -EBADR);
1094                         atomic_dec(&set->set_remaining);
1095                 }
1096
1097                 spin_lock(&req->rq_lock);
1098                 req->rq_set = NULL;
1099                 req->rq_invalid_rqset = 0;
1100                 spin_unlock(&req->rq_lock);
1101
1102                 ptlrpc_req_finished(req);
1103         }
1104
1105         LASSERT(atomic_read(&set->set_remaining) == 0);
1106
1107         ptlrpc_reqset_put(set);
1108         EXIT;
1109 }
1110 EXPORT_SYMBOL(ptlrpc_set_destroy);
1111
1112 /**
1113  * Add a new request to the general purpose request set.
1114  * Assumes request reference from the caller.
1115  */
1116 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
1117                         struct ptlrpc_request *req)
1118 {
1119         LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
1120         LASSERT(list_empty(&req->rq_set_chain));
1121
1122         if (req->rq_allow_intr)
1123                 set->set_allow_intr = 1;
1124
1125         /* The set takes over the caller's request reference */
1126         list_add_tail(&req->rq_set_chain, &set->set_requests);
1127         req->rq_set = set;
1128         atomic_inc(&set->set_remaining);
1129         req->rq_queued_time = ktime_get_seconds();
1130
1131         if (req->rq_reqmsg)
1132                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1133
1134         if (set->set_producer)
1135                 /*
1136                  * If the request set has a producer callback, the RPC must be
1137                  * sent straight away
1138                  */
1139                 ptlrpc_send_new_req(req);
1140 }
1141 EXPORT_SYMBOL(ptlrpc_set_add_req);
1142
1143 /**
1144  * Add a request to a request with dedicated server thread
1145  * and wake the thread to make any necessary processing.
1146  * Currently only used for ptlrpcd.
1147  */
1148 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1149                             struct ptlrpc_request *req)
1150 {
1151         struct ptlrpc_request_set *set = pc->pc_set;
1152         int count, i;
1153
1154         LASSERT(req->rq_set == NULL);
1155         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1156
1157         spin_lock(&set->set_new_req_lock);
1158         /*
1159          * The set takes over the caller's request reference.
1160          */
1161         req->rq_set = set;
1162         req->rq_queued_time = ktime_get_seconds();
1163         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1164         count = atomic_inc_return(&set->set_new_count);
1165         spin_unlock(&set->set_new_req_lock);
1166
1167         /* Only need to call wakeup once for the first entry. */
1168         if (count == 1) {
1169                 wake_up(&set->set_waitq);
1170
1171                 /*
1172                  * XXX: It maybe unnecessary to wakeup all the partners. But to
1173                  *      guarantee the async RPC can be processed ASAP, we have
1174                  *      no other better choice. It maybe fixed in future.
1175                  */
1176                 for (i = 0; i < pc->pc_npartners; i++)
1177                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1178         }
1179 }
1180
1181 /**
1182  * Based on the current state of the import, determine if the request
1183  * can be sent, is an error, or should be delayed.
1184  *
1185  * Returns true if this request should be delayed. If false, and
1186  * *status is set, then the request can not be sent and *status is the
1187  * error code.  If false and status is 0, then request can be sent.
1188  *
1189  * The imp->imp_lock must be held.
1190  */
1191 static int ptlrpc_import_delay_req(struct obd_import *imp,
1192                                    struct ptlrpc_request *req, int *status)
1193 {
1194         int delay = 0;
1195
1196         ENTRY;
1197         LASSERT(status);
1198         *status = 0;
1199
1200         if (req->rq_ctx_init || req->rq_ctx_fini) {
1201                 /* always allow ctx init/fini rpc go through */
1202         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1203                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
1204                 *status = -EIO;
1205         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1206                 unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
1207
1208                 /*
1209                  * pings or MDS-equivalent STATFS may safely
1210                  * race with umount
1211                  */
1212                 DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
1213                           D_HA : D_ERROR, req, "IMP_CLOSED ");
1214                 *status = -EIO;
1215         } else if (ptlrpc_send_limit_expired(req)) {
1216                 /* probably doesn't need to be a D_ERROR afterinitial testing */
1217                 DEBUG_REQ(D_HA, req, "send limit expired ");
1218                 *status = -ETIMEDOUT;
1219         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1220                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1221                 ;/* allow CONNECT even if import is invalid */
1222                 if (atomic_read(&imp->imp_inval_count) != 0) {
1223                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1224                         *status = -EIO;
1225                 }
1226         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1227                 if (!imp->imp_deactive)
1228                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1229                 *status = -ESHUTDOWN; /* b=12940 */
1230         } else if (req->rq_import_generation != imp->imp_generation) {
1231                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
1232                 *status = -EIO;
1233         } else if (req->rq_send_state != imp->imp_state) {
1234                 /* invalidate in progress - any requests should be drop */
1235                 if (atomic_read(&imp->imp_inval_count) != 0) {
1236                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1237                         *status = -EIO;
1238                 } else if (req->rq_no_delay &&
1239                            imp->imp_generation != imp->imp_initiated_at) {
1240                         /* ignore nodelay for requests initiating connections */
1241                         *status = -EWOULDBLOCK;
1242                 } else if (req->rq_allow_replay &&
1243                            (imp->imp_state == LUSTRE_IMP_REPLAY ||
1244                             imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1245                             imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1246                             imp->imp_state == LUSTRE_IMP_RECOVER)) {
1247                         DEBUG_REQ(D_HA, req, "allow during recovery.\n");
1248                 } else {
1249                         delay = 1;
1250                 }
1251         }
1252
1253         RETURN(delay);
1254 }
1255
1256 /**
1257  * Decide if the error message should be printed to the console or not.
1258  * Makes its decision based on request type, status, and failure frequency.
1259  *
1260  * \param[in] req  request that failed and may need a console message
1261  *
1262  * \retval false if no message should be printed
1263  * \retval true  if console message should be printed
1264  */
1265 static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
1266 {
1267         LASSERT(req->rq_reqmsg != NULL);
1268
1269         /* Suppress particular reconnect errors which are to be expected. */
1270         if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
1271                 /* Suppress timed out reconnect requests */
1272                 if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
1273                     req->rq_timedout)
1274                         return false;
1275
1276                 /*
1277                  * Suppress most unavailable/again reconnect requests, but
1278                  * print occasionally so it is clear client is trying to
1279                  * connect to a server where no target is running.
1280                  */
1281                 if ((err == -ENODEV || err == -EAGAIN) &&
1282                     req->rq_import->imp_conn_cnt % 30 != 20)
1283                         return false;
1284         }
1285
1286         if (opc == LDLM_ENQUEUE && err == -EAGAIN)
1287                 /* -EAGAIN is normal when using POSIX flocks */
1288                 return false;
1289
1290         if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
1291             (req->rq_xid & 0xf) != 10)
1292                 /* Suppress most ping requests, they may fail occasionally */
1293                 return false;
1294
1295         return true;
1296 }
1297
1298 /**
1299  * Check request processing status.
1300  * Returns the status.
1301  */
1302 static int ptlrpc_check_status(struct ptlrpc_request *req)
1303 {
1304         int err;
1305
1306         ENTRY;
1307         err = lustre_msg_get_status(req->rq_repmsg);
1308         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1309                 struct obd_import *imp = req->rq_import;
1310                 lnet_nid_t nid = imp->imp_connection->c_peer.nid;
1311                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1312
1313                 if (ptlrpc_console_allow(req, opc, err))
1314                         LCONSOLE_ERROR_MSG(0x11,
1315                                            "%s: operation %s to node %s failed: rc = %d\n",
1316                                            imp->imp_obd->obd_name,
1317                                            ll_opcode2str(opc),
1318                                            libcfs_nid2str(nid), err);
1319                 RETURN(err < 0 ? err : -EINVAL);
1320         }
1321
1322         if (err < 0) {
1323                 DEBUG_REQ(D_INFO, req, "status is %d", err);
1324         } else if (err > 0) {
1325                 /* XXX: translate this error from net to host */
1326                 DEBUG_REQ(D_INFO, req, "status is %d", err);
1327         }
1328
1329         RETURN(err);
1330 }
1331
1332 /**
1333  * save pre-versions of objects into request for replay.
1334  * Versions are obtained from server reply.
1335  * used for VBR.
1336  */
1337 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1338 {
1339         struct lustre_msg *repmsg = req->rq_repmsg;
1340         struct lustre_msg *reqmsg = req->rq_reqmsg;
1341         __u64 *versions = lustre_msg_get_versions(repmsg);
1342
1343         ENTRY;
1344         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1345                 return;
1346
1347         LASSERT(versions);
1348         lustre_msg_set_versions(reqmsg, versions);
1349         CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
1350                versions[0], versions[1]);
1351
1352         EXIT;
1353 }
1354
1355 __u64 ptlrpc_known_replied_xid(struct obd_import *imp)
1356 {
1357         struct ptlrpc_request *req;
1358
1359         assert_spin_locked(&imp->imp_lock);
1360         if (list_empty(&imp->imp_unreplied_list))
1361                 return 0;
1362
1363         req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
1364                          rq_unreplied_list);
1365         LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
1366
1367         if (imp->imp_known_replied_xid < req->rq_xid - 1)
1368                 imp->imp_known_replied_xid = req->rq_xid - 1;
1369
1370         return req->rq_xid - 1;
1371 }
1372
1373 /**
1374  * Callback function called when client receives RPC reply for \a req.
1375  * Returns 0 on success or error code.
1376  * The return alue would be assigned to req->rq_status by the caller
1377  * as request processing status.
1378  * This function also decides if the request needs to be saved for later replay.
1379  */
1380 static int after_reply(struct ptlrpc_request *req)
1381 {
1382         struct obd_import *imp = req->rq_import;
1383         struct obd_device *obd = req->rq_import->imp_obd;
1384         ktime_t work_start;
1385         u64 committed;
1386         s64 timediff;
1387         int rc;
1388
1389         ENTRY;
1390         LASSERT(obd != NULL);
1391         /* repbuf must be unlinked */
1392         LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
1393
1394         if (req->rq_reply_truncated) {
1395                 if (ptlrpc_no_resend(req)) {
1396                         DEBUG_REQ(D_ERROR, req,
1397                                   "reply buffer overflow, expected: %d, actual size: %d",
1398                                   req->rq_nob_received, req->rq_repbuf_len);
1399                         RETURN(-EOVERFLOW);
1400                 }
1401
1402                 sptlrpc_cli_free_repbuf(req);
1403                 /*
1404                  * Pass the required reply buffer size (include
1405                  * space for early reply).
1406                  * NB: no need to roundup because alloc_repbuf
1407                  * will roundup it
1408                  */
1409                 req->rq_replen = req->rq_nob_received;
1410                 req->rq_nob_received = 0;
1411                 spin_lock(&req->rq_lock);
1412                 req->rq_resend       = 1;
1413                 spin_unlock(&req->rq_lock);
1414                 RETURN(0);
1415         }
1416
1417         work_start = ktime_get_real();
1418         timediff = ktime_us_delta(work_start, req->rq_sent_ns);
1419
1420         /*
1421          * NB Until this point, the whole of the incoming message,
1422          * including buflens, status etc is in the sender's byte order.
1423          */
1424         rc = sptlrpc_cli_unwrap_reply(req);
1425         if (rc) {
1426                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
1427                 RETURN(rc);
1428         }
1429
1430         /*
1431          * Security layer unwrap might ask resend this request.
1432          */
1433         if (req->rq_resend)
1434                 RETURN(0);
1435
1436         rc = unpack_reply(req);
1437         if (rc)
1438                 RETURN(rc);
1439
1440         /* retry indefinitely on EINPROGRESS */
1441         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1442             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1443                 time64_t now = ktime_get_real_seconds();
1444
1445                 DEBUG_REQ(req->rq_nr_resend > 0 ? D_ERROR : D_RPCTRACE, req,
1446                           "Resending request on EINPROGRESS");
1447                 spin_lock(&req->rq_lock);
1448                 req->rq_resend = 1;
1449                 spin_unlock(&req->rq_lock);
1450                 req->rq_nr_resend++;
1451
1452                 /* Readjust the timeout for current conditions */
1453                 ptlrpc_at_set_req_timeout(req);
1454                 /*
1455                  * delay resend to give a chance to the server to get ready.
1456                  * The delay is increased by 1s on every resend and is capped to
1457                  * the current request timeout (i.e. obd_timeout if AT is off,
1458                  * or AT service time x 125% + 5s, see at_est2timeout)
1459                  */
1460                 if (req->rq_nr_resend > req->rq_timeout)
1461                         req->rq_sent = now + req->rq_timeout;
1462                 else
1463                         req->rq_sent = now + req->rq_nr_resend;
1464
1465                 /* Resend for EINPROGRESS will use a new XID */
1466                 spin_lock(&imp->imp_lock);
1467                 list_del_init(&req->rq_unreplied_list);
1468                 spin_unlock(&imp->imp_lock);
1469
1470                 RETURN(0);
1471         }
1472
1473         if (obd->obd_svc_stats) {
1474                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1475                                     timediff);
1476                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1477         }
1478
1479         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1480             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1481                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1482                           lustre_msg_get_type(req->rq_repmsg));
1483                 RETURN(-EPROTO);
1484         }
1485
1486         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1487                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1488         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1489         ptlrpc_at_adj_net_latency(req,
1490                                   lustre_msg_get_service_time(req->rq_repmsg));
1491
1492         rc = ptlrpc_check_status(req);
1493
1494         if (rc) {
1495                 /*
1496                  * Either we've been evicted, or the server has failed for
1497                  * some reason. Try to reconnect, and if that fails, punt to
1498                  * the upcall.
1499                  */
1500                 if (ptlrpc_recoverable_error(rc)) {
1501                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1502                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1503                                 RETURN(rc);
1504                         }
1505                         ptlrpc_request_handle_notconn(req);
1506                         RETURN(rc);
1507                 }
1508         } else {
1509                 /*
1510                  * Let's look if server sent slv. Do it only for RPC with
1511                  * rc == 0.
1512                  */
1513                 ldlm_cli_update_pool(req);
1514         }
1515
1516         /*
1517          * Store transno in reqmsg for replay.
1518          */
1519         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1520                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1521                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1522         }
1523
1524         if (imp->imp_replayable) {
1525                 spin_lock(&imp->imp_lock);
1526                 /*
1527                  * No point in adding already-committed requests to the replay
1528                  * list, we will just remove them immediately. b=9829
1529                  */
1530                 if (req->rq_transno != 0 &&
1531                     (req->rq_transno >
1532                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1533                      req->rq_replay)) {
1534                         /** version recovery */
1535                         ptlrpc_save_versions(req);
1536                         ptlrpc_retain_replayable_request(req, imp);
1537                 } else if (req->rq_commit_cb &&
1538                            list_empty(&req->rq_replay_list)) {
1539                         /*
1540                          * NB: don't call rq_commit_cb if it's already on
1541                          * rq_replay_list, ptlrpc_free_committed() will call
1542                          * it later, see LU-3618 for details
1543                          */
1544                         spin_unlock(&imp->imp_lock);
1545                         req->rq_commit_cb(req);
1546                         spin_lock(&imp->imp_lock);
1547                 }
1548
1549                 /*
1550                  * Replay-enabled imports return commit-status information.
1551                  */
1552                 committed = lustre_msg_get_last_committed(req->rq_repmsg);
1553                 if (likely(committed > imp->imp_peer_committed_transno))
1554                         imp->imp_peer_committed_transno = committed;
1555
1556                 ptlrpc_free_committed(imp);
1557
1558                 if (!list_empty(&imp->imp_replay_list)) {
1559                         struct ptlrpc_request *last;
1560
1561                         last = list_entry(imp->imp_replay_list.prev,
1562                                           struct ptlrpc_request,
1563                                           rq_replay_list);
1564                         /*
1565                          * Requests with rq_replay stay on the list even if no
1566                          * commit is expected.
1567                          */
1568                         if (last->rq_transno > imp->imp_peer_committed_transno)
1569                                 ptlrpc_pinger_commit_expected(imp);
1570                 }
1571
1572                 spin_unlock(&imp->imp_lock);
1573         }
1574
1575         RETURN(rc);
1576 }
1577
1578 /**
1579  * Helper function to send request \a req over the network for the first time
1580  * Also adjusts request phase.
1581  * Returns 0 on success or error code.
1582  */
1583 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1584 {
1585         struct obd_import *imp = req->rq_import;
1586         __u64 min_xid = 0;
1587         int rc;
1588
1589         ENTRY;
1590         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1591
1592         /* do not try to go further if there is not enough memory in enc_pool */
1593         if (req->rq_sent && req->rq_bulk)
1594                 if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
1595                     pool_is_at_full_capacity())
1596                         RETURN(-ENOMEM);
1597
1598         if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
1599             (!req->rq_generation_set ||
1600              req->rq_import_generation == imp->imp_generation))
1601                 RETURN(0);
1602
1603         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1604
1605         spin_lock(&imp->imp_lock);
1606
1607         LASSERT(req->rq_xid != 0);
1608         LASSERT(!list_empty(&req->rq_unreplied_list));
1609
1610         if (!req->rq_generation_set)
1611                 req->rq_import_generation = imp->imp_generation;
1612
1613         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1614                 spin_lock(&req->rq_lock);
1615                 req->rq_waiting = 1;
1616                 spin_unlock(&req->rq_lock);
1617
1618                 DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
1619                           ptlrpc_import_state_name(req->rq_send_state),
1620                           ptlrpc_import_state_name(imp->imp_state));
1621                 LASSERT(list_empty(&req->rq_list));
1622                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1623                 atomic_inc(&req->rq_import->imp_inflight);
1624                 spin_unlock(&imp->imp_lock);
1625                 RETURN(0);
1626         }
1627
1628         if (rc != 0) {
1629                 spin_unlock(&imp->imp_lock);
1630                 req->rq_status = rc;
1631                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1632                 RETURN(rc);
1633         }
1634
1635         LASSERT(list_empty(&req->rq_list));
1636         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1637         atomic_inc(&req->rq_import->imp_inflight);
1638
1639         /*
1640          * find the known replied XID from the unreplied list, CONNECT
1641          * and DISCONNECT requests are skipped to make the sanity check
1642          * on server side happy. see process_req_last_xid().
1643          *
1644          * For CONNECT: Because replay requests have lower XID, it'll
1645          * break the sanity check if CONNECT bump the exp_last_xid on
1646          * server.
1647          *
1648          * For DISCONNECT: Since client will abort inflight RPC before
1649          * sending DISCONNECT, DISCONNECT may carry an XID which higher
1650          * than the inflight RPC.
1651          */
1652         if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
1653                 min_xid = ptlrpc_known_replied_xid(imp);
1654         spin_unlock(&imp->imp_lock);
1655
1656         lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
1657
1658         lustre_msg_set_status(req->rq_reqmsg, current_pid());
1659
1660         rc = sptlrpc_req_refresh_ctx(req, -1);
1661         if (rc) {
1662                 if (req->rq_err) {
1663                         req->rq_status = rc;
1664                         RETURN(1);
1665                 } else {
1666                         spin_lock(&req->rq_lock);
1667                         req->rq_wait_ctx = 1;
1668                         spin_unlock(&req->rq_lock);
1669                         RETURN(0);
1670                 }
1671         }
1672
1673         CDEBUG(D_RPCTRACE,
1674                "Sending RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
1675                req, current_comm(),
1676                imp->imp_obd->obd_uuid.uuid,
1677                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1678                obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg),
1679                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
1680
1681         rc = ptl_send_rpc(req, 0);
1682         if (rc == -ENOMEM) {
1683                 spin_lock(&imp->imp_lock);
1684                 if (!list_empty(&req->rq_list)) {
1685                         list_del_init(&req->rq_list);
1686                         atomic_dec(&req->rq_import->imp_inflight);
1687                 }
1688                 spin_unlock(&imp->imp_lock);
1689                 ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
1690                 RETURN(rc);
1691         }
1692         if (rc) {
1693                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
1694                 spin_lock(&req->rq_lock);
1695                 req->rq_net_err = 1;
1696                 spin_unlock(&req->rq_lock);
1697                 RETURN(rc);
1698         }
1699         RETURN(0);
1700 }
1701
1702 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1703 {
1704         int remaining, rc;
1705
1706         ENTRY;
1707         LASSERT(set->set_producer != NULL);
1708
1709         remaining = atomic_read(&set->set_remaining);
1710
1711         /*
1712          * populate the ->set_requests list with requests until we
1713          * reach the maximum number of RPCs in flight for this set
1714          */
1715         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1716                 rc = set->set_producer(set, set->set_producer_arg);
1717                 if (rc == -ENOENT) {
1718                         /* no more RPC to produce */
1719                         set->set_producer     = NULL;
1720                         set->set_producer_arg = NULL;
1721                         RETURN(0);
1722                 }
1723         }
1724
1725         RETURN((atomic_read(&set->set_remaining) - remaining));
1726 }
1727
1728 /**
1729  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1730  * and no more replies are expected.
1731  * (it is possible to get less replies than requests sent e.g. due to timed out
1732  * requests or requests that we had trouble to send out)
1733  *
1734  * NOTE: This function contains a potential schedule point (cond_resched()).
1735  */
1736 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1737 {
1738         struct list_head *tmp, *next;
1739         struct list_head  comp_reqs;
1740         int force_timer_recalc = 0;
1741
1742         ENTRY;
1743         if (atomic_read(&set->set_remaining) == 0)
1744                 RETURN(1);
1745
1746         INIT_LIST_HEAD(&comp_reqs);
1747         list_for_each_safe(tmp, next, &set->set_requests) {
1748                 struct ptlrpc_request *req =
1749                         list_entry(tmp, struct ptlrpc_request,
1750                                    rq_set_chain);
1751                 struct obd_import *imp = req->rq_import;
1752                 int unregistered = 0;
1753                 int async = 1;
1754                 int rc = 0;
1755
1756                 if (req->rq_phase == RQ_PHASE_COMPLETE) {
1757                         list_move_tail(&req->rq_set_chain, &comp_reqs);
1758                         continue;
1759                 }
1760
1761                 /*
1762                  * This schedule point is mainly for the ptlrpcd caller of this
1763                  * function.  Most ptlrpc sets are not long-lived and unbounded
1764                  * in length, but at the least the set used by the ptlrpcd is.
1765                  * Since the processing time is unbounded, we need to insert an
1766                  * explicit schedule point to make the thread well-behaved.
1767                  */
1768                 cond_resched();
1769
1770                 /*
1771                  * If the caller requires to allow to be interpreted by force
1772                  * and it has really been interpreted, then move the request
1773                  * to RQ_PHASE_INTERPRET phase in spite of what the current
1774                  * phase is.
1775                  */
1776                 if (unlikely(req->rq_allow_intr && req->rq_intr)) {
1777                         req->rq_status = -EINTR;
1778                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1779
1780                         /*
1781                          * Since it is interpreted and we have to wait for
1782                          * the reply to be unlinked, then use sync mode.
1783                          */
1784                         async = 0;
1785
1786                         GOTO(interpret, req->rq_status);
1787                 }
1788
1789                 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
1790                         force_timer_recalc = 1;
1791
1792                 /* delayed send - skip */
1793                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1794                         continue;
1795
1796                 /* delayed resend - skip */
1797                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1798                     req->rq_sent > ktime_get_real_seconds())
1799                         continue;
1800
1801                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1802                       req->rq_phase == RQ_PHASE_BULK ||
1803                       req->rq_phase == RQ_PHASE_INTERPRET ||
1804                       req->rq_phase == RQ_PHASE_UNREG_RPC ||
1805                       req->rq_phase == RQ_PHASE_UNREG_BULK)) {
1806                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1807                         LBUG();
1808                 }
1809
1810                 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
1811                     req->rq_phase == RQ_PHASE_UNREG_BULK) {
1812                         LASSERT(req->rq_next_phase != req->rq_phase);
1813                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1814
1815                         if (req->rq_req_deadline &&
1816                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
1817                                 req->rq_req_deadline = 0;
1818                         if (req->rq_reply_deadline &&
1819                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
1820                                 req->rq_reply_deadline = 0;
1821                         if (req->rq_bulk_deadline &&
1822                             !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
1823                                 req->rq_bulk_deadline = 0;
1824
1825                         /*
1826                          * Skip processing until reply is unlinked. We
1827                          * can't return to pool before that and we can't
1828                          * call interpret before that. We need to make
1829                          * sure that all rdma transfers finished and will
1830                          * not corrupt any data.
1831                          */
1832                         if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
1833                             ptlrpc_client_recv_or_unlink(req))
1834                                 continue;
1835                         if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
1836                             ptlrpc_client_bulk_active(req))
1837                                 continue;
1838
1839                         /*
1840                          * Turn fail_loc off to prevent it from looping
1841                          * forever.
1842                          */
1843                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1844                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1845                                                      OBD_FAIL_ONCE);
1846                         }
1847                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1848                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1849                                                      OBD_FAIL_ONCE);
1850                         }
1851
1852                         /*
1853                          * Move to next phase if reply was successfully
1854                          * unlinked.
1855                          */
1856                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1857                 }
1858
1859                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1860                         GOTO(interpret, req->rq_status);
1861
1862                 /*
1863                  * Note that this also will start async reply unlink.
1864                  */
1865                 if (req->rq_net_err && !req->rq_timedout) {
1866                         ptlrpc_expire_one_request(req, 1);
1867
1868                         /*
1869                          * Check if we still need to wait for unlink.
1870                          */
1871                         if (ptlrpc_client_recv_or_unlink(req) ||
1872                             ptlrpc_client_bulk_active(req))
1873                                 continue;
1874                         /* If there is no need to resend, fail it now. */
1875                         if (req->rq_no_resend) {
1876                                 if (req->rq_status == 0)
1877                                         req->rq_status = -EIO;
1878                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1879                                 GOTO(interpret, req->rq_status);
1880                         } else {
1881                                 continue;
1882                         }
1883                 }
1884
1885                 if (req->rq_err) {
1886                         spin_lock(&req->rq_lock);
1887                         req->rq_replied = 0;
1888                         spin_unlock(&req->rq_lock);
1889                         if (req->rq_status == 0)
1890                                 req->rq_status = -EIO;
1891                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1892                         GOTO(interpret, req->rq_status);
1893                 }
1894
1895                 /*
1896                  * ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
1897                  * so it sets rq_intr regardless of individual rpc
1898                  * timeouts. The synchronous IO waiting path sets
1899                  * rq_intr irrespective of whether ptlrpcd
1900                  * has seen a timeout.  Our policy is to only interpret
1901                  * interrupted rpcs after they have timed out, so we
1902                  * need to enforce that here.
1903                  */
1904
1905                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
1906                                      req->rq_wait_ctx)) {
1907                         req->rq_status = -EINTR;
1908                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1909                         GOTO(interpret, req->rq_status);
1910                 }
1911
1912                 if (req->rq_phase == RQ_PHASE_RPC) {
1913                         if (req->rq_timedout || req->rq_resend ||
1914                             req->rq_waiting || req->rq_wait_ctx) {
1915                                 int status;
1916
1917                                 if (!ptlrpc_unregister_reply(req, 1)) {
1918                                         ptlrpc_unregister_bulk(req, 1);
1919                                         continue;
1920                                 }
1921
1922                                 spin_lock(&imp->imp_lock);
1923                                 if (ptlrpc_import_delay_req(imp, req,
1924                                                             &status)) {
1925                                         /*
1926                                          * put on delay list - only if we wait
1927                                          * recovery finished - before send
1928                                          */
1929                                         list_move_tail(&req->rq_list,
1930                                                        &imp->imp_delayed_list);
1931                                         spin_unlock(&imp->imp_lock);
1932                                         continue;
1933                                 }
1934
1935                                 if (status != 0)  {
1936                                         req->rq_status = status;
1937                                         ptlrpc_rqphase_move(req,
1938                                                             RQ_PHASE_INTERPRET);
1939                                         spin_unlock(&imp->imp_lock);
1940                                         GOTO(interpret, req->rq_status);
1941                                 }
1942                                 /* ignore on just initiated connections */
1943                                 if (ptlrpc_no_resend(req) &&
1944                                     !req->rq_wait_ctx &&
1945                                     imp->imp_generation !=
1946                                     imp->imp_initiated_at) {
1947                                         req->rq_status = -ENOTCONN;
1948                                         ptlrpc_rqphase_move(req,
1949                                                             RQ_PHASE_INTERPRET);
1950                                         spin_unlock(&imp->imp_lock);
1951                                         GOTO(interpret, req->rq_status);
1952                                 }
1953
1954                                 list_move_tail(&req->rq_list,
1955                                                &imp->imp_sending_list);
1956
1957                                 spin_unlock(&imp->imp_lock);
1958
1959                                 spin_lock(&req->rq_lock);
1960                                 req->rq_waiting = 0;
1961                                 spin_unlock(&req->rq_lock);
1962
1963                                 if (req->rq_timedout || req->rq_resend) {
1964                                         /*
1965                                          * This is re-sending anyways,
1966                                          * let's mark req as resend.
1967                                          */
1968                                         spin_lock(&req->rq_lock);
1969                                         req->rq_resend = 1;
1970                                         spin_unlock(&req->rq_lock);
1971                                 }
1972                                 /*
1973                                  * rq_wait_ctx is only touched by ptlrpcd,
1974                                  * so no lock is needed here.
1975                                  */
1976                                 status = sptlrpc_req_refresh_ctx(req, -1);
1977                                 if (status) {
1978                                         if (req->rq_err) {
1979                                                 req->rq_status = status;
1980                                                 spin_lock(&req->rq_lock);
1981                                                 req->rq_wait_ctx = 0;
1982                                                 spin_unlock(&req->rq_lock);
1983                                                 force_timer_recalc = 1;
1984                                         } else {
1985                                                 spin_lock(&req->rq_lock);
1986                                                 req->rq_wait_ctx = 1;
1987                                                 spin_unlock(&req->rq_lock);
1988                                         }
1989
1990                                         continue;
1991                                 } else {
1992                                         spin_lock(&req->rq_lock);
1993                                         req->rq_wait_ctx = 0;
1994                                         spin_unlock(&req->rq_lock);
1995                                 }
1996
1997                                 /*
1998                                  * In any case, the previous bulk should be
1999                                  * cleaned up to prepare for the new sending
2000                                  */
2001                                 if (req->rq_bulk &&
2002                                     !ptlrpc_unregister_bulk(req, 1))
2003                                         continue;
2004
2005                                 rc = ptl_send_rpc(req, 0);
2006                                 if (rc == -ENOMEM) {
2007                                         spin_lock(&imp->imp_lock);
2008                                         if (!list_empty(&req->rq_list))
2009                                                 list_del_init(&req->rq_list);
2010                                         spin_unlock(&imp->imp_lock);
2011                                         ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
2012                                         continue;
2013                                 }
2014                                 if (rc) {
2015                                         DEBUG_REQ(D_HA, req,
2016                                                   "send failed: rc = %d", rc);
2017                                         force_timer_recalc = 1;
2018                                         spin_lock(&req->rq_lock);
2019                                         req->rq_net_err = 1;
2020                                         spin_unlock(&req->rq_lock);
2021                                         continue;
2022                                 }
2023                                 /* need to reset the timeout */
2024                                 force_timer_recalc = 1;
2025                         }
2026
2027                         spin_lock(&req->rq_lock);
2028
2029                         if (ptlrpc_client_early(req)) {
2030                                 ptlrpc_at_recv_early_reply(req);
2031                                 spin_unlock(&req->rq_lock);
2032                                 continue;
2033                         }
2034
2035                         /* Still waiting for a reply? */
2036                         if (ptlrpc_client_recv(req)) {
2037                                 spin_unlock(&req->rq_lock);
2038                                 continue;
2039                         }
2040
2041                         /* Did we actually receive a reply? */
2042                         if (!ptlrpc_client_replied(req)) {
2043                                 spin_unlock(&req->rq_lock);
2044                                 continue;
2045                         }
2046
2047                         spin_unlock(&req->rq_lock);
2048
2049                         /*
2050                          * unlink from net because we are going to
2051                          * swab in-place of reply buffer
2052                          */
2053                         unregistered = ptlrpc_unregister_reply(req, 1);
2054                         if (!unregistered)
2055                                 continue;
2056
2057                         req->rq_status = after_reply(req);
2058                         if (req->rq_resend)
2059                                 continue;
2060
2061                         /*
2062                          * If there is no bulk associated with this request,
2063                          * then we're done and should let the interpreter
2064                          * process the reply. Similarly if the RPC returned
2065                          * an error, and therefore the bulk will never arrive.
2066                          */
2067                         if (!req->rq_bulk || req->rq_status < 0) {
2068                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2069                                 GOTO(interpret, req->rq_status);
2070                         }
2071
2072                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
2073                 }
2074
2075                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
2076                 if (ptlrpc_client_bulk_active(req))
2077                         continue;
2078
2079                 if (req->rq_bulk->bd_failure) {
2080                         /*
2081                          * The RPC reply arrived OK, but the bulk screwed
2082                          * up!  Dead weird since the server told us the RPC
2083                          * was good after getting the REPLY for her GET or
2084                          * the ACK for her PUT.
2085                          */
2086                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
2087                         req->rq_status = -EIO;
2088                 }
2089
2090                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2091
2092 interpret:
2093                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
2094
2095                 /*
2096                  * This moves to "unregistering" phase we need to wait for
2097                  * reply unlink.
2098                  */
2099                 if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
2100                         /* start async bulk unlink too */
2101                         ptlrpc_unregister_bulk(req, 1);
2102                         continue;
2103                 }
2104
2105                 if (!ptlrpc_unregister_bulk(req, async))
2106                         continue;
2107
2108                 /*
2109                  * When calling interpret receiving already should be
2110                  * finished.
2111                  */
2112                 LASSERT(!req->rq_receiving_reply);
2113
2114                 ptlrpc_req_interpret(env, req, req->rq_status);
2115
2116                 if (ptlrpcd_check_work(req)) {
2117                         atomic_dec(&set->set_remaining);
2118                         continue;
2119                 }
2120                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
2121
2122                 if (req->rq_reqmsg)
2123                         CDEBUG(D_RPCTRACE,
2124                                "Completed RPC req@%p pname:cluuid:pid:xid:nid:opc:job %s:%s:%d:%llu:%s:%d:%s\n",
2125                                req, current_comm(),
2126                                imp->imp_obd->obd_uuid.uuid,
2127                                lustre_msg_get_status(req->rq_reqmsg),
2128                                req->rq_xid,
2129                                obd_import_nid2str(imp),
2130                                lustre_msg_get_opc(req->rq_reqmsg),
2131                                lustre_msg_get_jobid(req->rq_reqmsg) ?: "");
2132
2133                 spin_lock(&imp->imp_lock);
2134                 /*
2135                  * Request already may be not on sending or delaying list. This
2136                  * may happen in the case of marking it erroneous for the case
2137                  * ptlrpc_import_delay_req(req, status) find it impossible to
2138                  * allow sending this rpc and returns *status != 0.
2139                  */
2140                 if (!list_empty(&req->rq_list)) {
2141                         list_del_init(&req->rq_list);
2142                         atomic_dec(&imp->imp_inflight);
2143                 }
2144                 list_del_init(&req->rq_unreplied_list);
2145                 spin_unlock(&imp->imp_lock);
2146
2147                 atomic_dec(&set->set_remaining);
2148                 wake_up_all(&imp->imp_recovery_waitq);
2149
2150                 if (set->set_producer) {
2151                         /* produce a new request if possible */
2152                         if (ptlrpc_set_producer(set) > 0)
2153                                 force_timer_recalc = 1;
2154
2155                         /*
2156                          * free the request that has just been completed
2157                          * in order not to pollute set->set_requests
2158                          */
2159                         list_del_init(&req->rq_set_chain);
2160                         spin_lock(&req->rq_lock);
2161                         req->rq_set = NULL;
2162                         req->rq_invalid_rqset = 0;
2163                         spin_unlock(&req->rq_lock);
2164
2165                         /* record rq_status to compute the final status later */
2166                         if (req->rq_status != 0)
2167                                 set->set_rc = req->rq_status;
2168                         ptlrpc_req_finished(req);
2169                 } else {
2170                         list_move_tail(&req->rq_set_chain, &comp_reqs);
2171                 }
2172         }
2173
2174         /*
2175          * move completed request at the head of list so it's easier for
2176          * caller to find them
2177          */
2178         list_splice(&comp_reqs, &set->set_requests);
2179
2180         /* If we hit an error, we want to recover promptly. */
2181         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
2182 }
2183 EXPORT_SYMBOL(ptlrpc_check_set);
2184
2185 /**
2186  * Time out request \a req. is \a async_unlink is set, that means do not wait
2187  * until LNet actually confirms network buffer unlinking.
2188  * Return 1 if we should give up further retrying attempts or 0 otherwise.
2189  */
2190 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
2191 {
2192         struct obd_import *imp = req->rq_import;
2193         unsigned int debug_mask = D_RPCTRACE;
2194         int rc = 0;
2195
2196         ENTRY;
2197         spin_lock(&req->rq_lock);
2198         req->rq_timedout = 1;
2199         spin_unlock(&req->rq_lock);
2200
2201         if (ptlrpc_console_allow(req, lustre_msg_get_opc(req->rq_reqmsg),
2202                                  lustre_msg_get_status(req->rq_reqmsg)))
2203                 debug_mask = D_WARNING;
2204         DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
2205                   req->rq_net_err ? "failed due to network error" :
2206                      ((req->rq_real_sent == 0 ||
2207                        req->rq_real_sent < req->rq_sent ||
2208                        req->rq_real_sent >= req->rq_deadline) ?
2209                       "timed out for sent delay" : "timed out for slow reply"),
2210                   (s64)req->rq_sent, (s64)req->rq_real_sent);
2211
2212         if (imp && obd_debug_peer_on_timeout)
2213                 LNetDebugPeer(imp->imp_connection->c_peer);
2214
2215         ptlrpc_unregister_reply(req, async_unlink);
2216         ptlrpc_unregister_bulk(req, async_unlink);
2217
2218         if (obd_dump_on_timeout)
2219                 libcfs_debug_dumplog();
2220
2221         if (!imp) {
2222                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
2223                 RETURN(1);
2224         }
2225
2226         atomic_inc(&imp->imp_timeouts);
2227
2228         /* The DLM server doesn't want recovery run on its imports. */
2229         if (imp->imp_dlm_fake)
2230                 RETURN(1);
2231
2232         /*
2233          * If this request is for recovery or other primordial tasks,
2234          * then error it out here.
2235          */
2236         if (req->rq_ctx_init || req->rq_ctx_fini ||
2237             req->rq_send_state != LUSTRE_IMP_FULL ||
2238             imp->imp_obd->obd_no_recov) {
2239                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
2240                           ptlrpc_import_state_name(req->rq_send_state),
2241                           ptlrpc_import_state_name(imp->imp_state));
2242                 spin_lock(&req->rq_lock);
2243                 req->rq_status = -ETIMEDOUT;
2244                 req->rq_err = 1;
2245                 spin_unlock(&req->rq_lock);
2246                 RETURN(1);
2247         }
2248
2249         /*
2250          * if a request can't be resent we can't wait for an answer after
2251          * the timeout
2252          */
2253         if (ptlrpc_no_resend(req)) {
2254                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
2255                 rc = 1;
2256         }
2257
2258         ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
2259
2260         RETURN(rc);
2261 }
2262
2263 /**
2264  * Time out all uncompleted requests in request set pointed by \a data
2265  * Callback used when waiting on sets with l_wait_event.
2266  * Always returns 1.
2267  */
2268 int ptlrpc_expired_set(void *data)
2269 {
2270         struct ptlrpc_request_set *set = data;
2271         struct list_head *tmp;
2272         time64_t now = ktime_get_real_seconds();
2273
2274         ENTRY;
2275         LASSERT(set != NULL);
2276
2277         /*
2278          * A timeout expired. See which reqs it applies to...
2279          */
2280         list_for_each(tmp, &set->set_requests) {
2281                 struct ptlrpc_request *req =
2282                         list_entry(tmp, struct ptlrpc_request,
2283                                    rq_set_chain);
2284
2285                 /* don't expire request waiting for context */
2286                 if (req->rq_wait_ctx)
2287                         continue;
2288
2289                 /* Request in-flight? */
2290                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2291                        !req->rq_waiting && !req->rq_resend) ||
2292                       (req->rq_phase == RQ_PHASE_BULK)))
2293                         continue;
2294
2295                 if (req->rq_timedout ||     /* already dealt with */
2296                     req->rq_deadline > now) /* not expired */
2297                         continue;
2298
2299                 /*
2300                  * Deal with this guy. Do it asynchronously to not block
2301                  * ptlrpcd thread.
2302                  */
2303                 ptlrpc_expire_one_request(req, 1);
2304         }
2305
2306         /*
2307          * When waiting for a whole set, we always break out of the
2308          * sleep so we can recalculate the timeout, or enable interrupts
2309          * if everyone's timed out.
2310          */
2311         RETURN(1);
2312 }
2313
2314 /**
2315  * Sets rq_intr flag in \a req under spinlock.
2316  */
2317 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
2318 {
2319         spin_lock(&req->rq_lock);
2320         req->rq_intr = 1;
2321         spin_unlock(&req->rq_lock);
2322 }
2323 EXPORT_SYMBOL(ptlrpc_mark_interrupted);
2324
2325 /**
2326  * Interrupts (sets interrupted flag) all uncompleted requests in
2327  * a set \a data. Callback for l_wait_event for interruptible waits.
2328  */
2329 static void ptlrpc_interrupted_set(void *data)
2330 {
2331         struct ptlrpc_request_set *set = data;
2332         struct list_head *tmp;
2333
2334         LASSERT(set != NULL);
2335         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2336
2337         list_for_each(tmp, &set->set_requests) {
2338                 struct ptlrpc_request *req =
2339                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2340
2341                 if (req->rq_intr)
2342                         continue;
2343
2344                 if (req->rq_phase != RQ_PHASE_RPC &&
2345                     req->rq_phase != RQ_PHASE_UNREG_RPC &&
2346                     !req->rq_allow_intr)
2347                         continue;
2348
2349                 ptlrpc_mark_interrupted(req);
2350         }
2351 }
2352
2353 /**
2354  * Get the smallest timeout in the set; this does NOT set a timeout.
2355  */
2356 time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2357 {
2358         struct list_head *tmp;
2359         time64_t now = ktime_get_real_seconds();
2360         int timeout = 0;
2361         struct ptlrpc_request *req;
2362         time64_t deadline;
2363
2364         ENTRY;
2365         list_for_each(tmp, &set->set_requests) {
2366                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2367
2368                 /* Request in-flight? */
2369                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2370                       (req->rq_phase == RQ_PHASE_BULK) ||
2371                       (req->rq_phase == RQ_PHASE_NEW)))
2372                         continue;
2373
2374                 /* Already timed out. */
2375                 if (req->rq_timedout)
2376                         continue;
2377
2378                 /* Waiting for ctx. */
2379                 if (req->rq_wait_ctx)
2380                         continue;
2381
2382                 if (req->rq_phase == RQ_PHASE_NEW)
2383                         deadline = req->rq_sent;
2384                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2385                         deadline = req->rq_sent;
2386                 else
2387                         deadline = req->rq_sent + req->rq_timeout;
2388
2389                 if (deadline <= now)    /* actually expired already */
2390                         timeout = 1;    /* ASAP */
2391                 else if (timeout == 0 || timeout > deadline - now)
2392                         timeout = deadline - now;
2393         }
2394         RETURN(timeout);
2395 }
2396
2397 /**
2398  * Send all unset request from the set and then wait untill all
2399  * requests in the set complete (either get a reply, timeout, get an
2400  * error or otherwise be interrupted).
2401  * Returns 0 on success or error code otherwise.
2402  */
2403 int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set)
2404 {
2405         struct list_head *tmp;
2406         struct ptlrpc_request *req;
2407         struct l_wait_info lwi;
2408         time64_t timeout;
2409         int rc;
2410
2411         ENTRY;
2412         if (set->set_producer)
2413                 (void)ptlrpc_set_producer(set);
2414         else
2415                 list_for_each(tmp, &set->set_requests) {
2416                         req = list_entry(tmp, struct ptlrpc_request,
2417                                          rq_set_chain);
2418                         if (req->rq_phase == RQ_PHASE_NEW)
2419                                 (void)ptlrpc_send_new_req(req);
2420                 }
2421
2422         if (list_empty(&set->set_requests))
2423                 RETURN(0);
2424
2425         do {
2426                 timeout = ptlrpc_set_next_timeout(set);
2427
2428                 /*
2429                  * wait until all complete, interrupted, or an in-flight
2430                  * req times out
2431                  */
2432                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
2433                        set, timeout);
2434
2435                 if ((timeout == 0 && !signal_pending(current)) ||
2436                     set->set_allow_intr)
2437                         /*
2438                          * No requests are in-flight (ether timed out
2439                          * or delayed), so we can allow interrupts.
2440                          * We still want to block for a limited time,
2441                          * so we allow interrupts during the timeout.
2442                          */
2443                         lwi = LWI_TIMEOUT_INTR_ALL(
2444                                         cfs_time_seconds(timeout ? timeout : 1),
2445                                         ptlrpc_expired_set,
2446                                         ptlrpc_interrupted_set, set);
2447                 else
2448                         /*
2449                          * At least one request is in flight, so no
2450                          * interrupts are allowed. Wait until all
2451                          * complete, or an in-flight req times out.
2452                          */
2453                         lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
2454                                           ptlrpc_expired_set, set);
2455
2456                 rc = l_wait_event(set->set_waitq,
2457                                   ptlrpc_check_set(NULL, set), &lwi);
2458
2459                 /*
2460                  * LU-769 - if we ignored the signal because it was already
2461                  * pending when we started, we need to handle it now or we risk
2462                  * it being ignored forever
2463                  */
2464                 if (rc == -ETIMEDOUT &&
2465                     (!lwi.lwi_allow_intr || set->set_allow_intr) &&
2466                     signal_pending(current)) {
2467                         sigset_t blocked_sigs =
2468                                            cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
2469
2470                         /*
2471                          * In fact we only interrupt for the "fatal" signals
2472                          * like SIGINT or SIGKILL. We still ignore less
2473                          * important signals since ptlrpc set is not easily
2474                          * reentrant from userspace again
2475                          */
2476                         if (signal_pending(current))
2477                                 ptlrpc_interrupted_set(set);
2478                         cfs_restore_sigs(blocked_sigs);
2479                 }
2480
2481                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2482
2483                 /*
2484                  * -EINTR => all requests have been flagged rq_intr so next
2485                  * check completes.
2486                  * -ETIMEDOUT => someone timed out.  When all reqs have
2487                  * timed out, signals are enabled allowing completion with
2488                  * EINTR.
2489                  * I don't really care if we go once more round the loop in
2490                  * the error cases -eeb.
2491                  */
2492                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2493                         list_for_each(tmp, &set->set_requests) {
2494                                 req = list_entry(tmp, struct ptlrpc_request,
2495                                                  rq_set_chain);
2496                                 spin_lock(&req->rq_lock);
2497                                 req->rq_invalid_rqset = 1;
2498                                 spin_unlock(&req->rq_lock);
2499                         }
2500                 }
2501         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2502
2503         LASSERT(atomic_read(&set->set_remaining) == 0);
2504
2505         rc = set->set_rc; /* rq_status of already freed requests if any */
2506         list_for_each(tmp, &set->set_requests) {
2507                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2508
2509                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2510                 if (req->rq_status != 0)
2511                         rc = req->rq_status;
2512         }
2513
2514         RETURN(rc);
2515 }
2516 EXPORT_SYMBOL(ptlrpc_set_wait);
2517
2518 /**
2519  * Helper fuction for request freeing.
2520  * Called when request count reached zero and request needs to be freed.
2521  * Removes request from all sorts of sending/replay lists it might be on,
2522  * frees network buffers if any are present.
2523  * If \a locked is set, that means caller is already holding import imp_lock
2524  * and so we no longer need to reobtain it (for certain lists manipulations)
2525  */
2526 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2527 {
2528         ENTRY;
2529
2530         if (!request)
2531                 RETURN_EXIT;
2532
2533         LASSERT(!request->rq_srv_req);
2534         LASSERT(request->rq_export == NULL);
2535         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
2536         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
2537         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
2538         LASSERTF(!request->rq_replay, "req %p\n", request);
2539
2540         req_capsule_fini(&request->rq_pill);
2541
2542         /*
2543          * We must take it off the imp_replay_list first.  Otherwise, we'll set
2544          * request->rq_reqmsg to NULL while osc_close is dereferencing it.
2545          */
2546         if (request->rq_import) {
2547                 if (!locked)
2548                         spin_lock(&request->rq_import->imp_lock);
2549                 list_del_init(&request->rq_replay_list);
2550                 list_del_init(&request->rq_unreplied_list);
2551                 if (!locked)
2552                         spin_unlock(&request->rq_import->imp_lock);
2553         }
2554         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
2555
2556         if (atomic_read(&request->rq_refcount) != 0) {
2557                 DEBUG_REQ(D_ERROR, request,
2558                           "freeing request with nonzero refcount");
2559                 LBUG();
2560         }
2561
2562         if (request->rq_repbuf)
2563                 sptlrpc_cli_free_repbuf(request);
2564
2565         if (request->rq_import) {
2566                 class_import_put(request->rq_import);
2567                 request->rq_import = NULL;
2568         }
2569         if (request->rq_bulk)
2570                 ptlrpc_free_bulk(request->rq_bulk);
2571
2572         if (request->rq_reqbuf || request->rq_clrbuf)
2573                 sptlrpc_cli_free_reqbuf(request);
2574
2575         if (request->rq_cli_ctx)
2576                 sptlrpc_req_put_ctx(request, !locked);
2577
2578         if (request->rq_pool)
2579                 __ptlrpc_free_req_to_pool(request);
2580         else
2581                 ptlrpc_request_cache_free(request);
2582         EXIT;
2583 }
2584
2585 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
2586 /**
2587  * Drop one request reference. Must be called with import imp_lock held.
2588  * When reference count drops to zero, request is freed.
2589  */
2590 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
2591 {
2592         assert_spin_locked(&request->rq_import->imp_lock);
2593         (void)__ptlrpc_req_finished(request, 1);
2594 }
2595
2596 /**
2597  * Helper function
2598  * Drops one reference count for request \a request.
2599  * \a locked set indicates that caller holds import imp_lock.
2600  * Frees the request whe reference count reaches zero.
2601  *
2602  * \retval 1    the request is freed
2603  * \retval 0    some others still hold references on the request
2604  */
2605 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
2606 {
2607         int count;
2608
2609         ENTRY;
2610         if (!request)
2611                 RETURN(1);
2612
2613         LASSERT(request != LP_POISON);
2614         LASSERT(request->rq_reqmsg != LP_POISON);
2615
2616         DEBUG_REQ(D_INFO, request, "refcount now %u",
2617                   atomic_read(&request->rq_refcount) - 1);
2618
2619         spin_lock(&request->rq_lock);
2620         count = atomic_dec_return(&request->rq_refcount);
2621         LASSERTF(count >= 0, "Invalid ref count %d\n", count);
2622
2623         /*
2624          * For open RPC, the client does not know the EA size (LOV, ACL, and
2625          * so on) before replied, then the client has to reserve very large
2626          * reply buffer. Such buffer will not be released until the RPC freed.
2627          * Since The open RPC is replayable, we need to keep it in the replay
2628          * list until close. If there are a lot of files opened concurrently,
2629          * then the client may be OOM.
2630          *
2631          * If fact, it is unnecessary to keep reply buffer for open replay,
2632          * related EAs have already been saved via mdc_save_lovea() before
2633          * coming here. So it is safe to free the reply buffer some earlier
2634          * before releasing the RPC to avoid client OOM. LU-9514
2635          */
2636         if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
2637                 spin_lock(&request->rq_early_free_lock);
2638                 sptlrpc_cli_free_repbuf(request);
2639                 request->rq_repbuf = NULL;
2640                 request->rq_repbuf_len = 0;
2641                 request->rq_repdata = NULL;
2642                 request->rq_reqdata_len = 0;
2643                 spin_unlock(&request->rq_early_free_lock);
2644         }
2645         spin_unlock(&request->rq_lock);
2646
2647         if (!count)
2648                 __ptlrpc_free_req(request, locked);
2649
2650         RETURN(!count);
2651 }
2652
2653 /**
2654  * Drops one reference count for a request.
2655  */
2656 void ptlrpc_req_finished(struct ptlrpc_request *request)
2657 {
2658         __ptlrpc_req_finished(request, 0);
2659 }
2660 EXPORT_SYMBOL(ptlrpc_req_finished);
2661
2662 /**
2663  * Returns xid of a \a request
2664  */
2665 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2666 {
2667         return request->rq_xid;
2668 }
2669 EXPORT_SYMBOL(ptlrpc_req_xid);
2670
2671 /**
2672  * Disengage the client's reply buffer from the network
2673  * NB does _NOT_ unregister any client-side bulk.
2674  * IDEMPOTENT, but _not_ safe against concurrent callers.
2675  * The request owner (i.e. the thread doing the I/O) must call...
2676  * Returns 0 on success or 1 if unregistering cannot be made.
2677  */
2678 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2679 {
2680         int rc;
2681         struct l_wait_info lwi;
2682
2683         /*
2684          * Might sleep.
2685          */
2686         LASSERT(!in_interrupt());
2687
2688         /* Let's setup deadline for reply unlink. */
2689         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2690             async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
2691                 request->rq_reply_deadline = ktime_get_real_seconds() +
2692                                              LONG_UNLINK;
2693
2694         /*
2695          * Nothing left to do.
2696          */
2697         if (!ptlrpc_client_recv_or_unlink(request))
2698                 RETURN(1);
2699
2700         LNetMDUnlink(request->rq_reply_md_h);
2701
2702         /*
2703          * Let's check it once again.
2704          */
2705         if (!ptlrpc_client_recv_or_unlink(request))
2706                 RETURN(1);
2707
2708         /* Move to "Unregistering" phase as reply was not unlinked yet. */
2709         ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
2710
2711         /*
2712          * Do not wait for unlink to finish.
2713          */
2714         if (async)
2715                 RETURN(0);
2716
2717         /*
2718          * We have to l_wait_event() whatever the result, to give liblustre
2719          * a chance to run reply_in_callback(), and to make sure we've
2720          * unlinked before returning a req to the pool.
2721          */
2722         for (;;) {
2723                 /* The wq argument is ignored by user-space wait_event macros */
2724                 wait_queue_head_t *wq = (request->rq_set) ?
2725                                         &request->rq_set->set_waitq :
2726                                         &request->rq_reply_waitq;
2727                 /*
2728                  * Network access will complete in finite time but the HUGE
2729                  * timeout lets us CWARN for visibility of sluggish NALs
2730                  */
2731                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
2732                                            cfs_time_seconds(1), NULL, NULL);
2733                 rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
2734                                   &lwi);
2735                 if (rc == 0) {
2736                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2737                         RETURN(1);
2738                 }
2739
2740                 LASSERT(rc == -ETIMEDOUT);
2741                 DEBUG_REQ(D_WARNING, request,
2742                           "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
2743                           request->rq_receiving_reply,
2744                           request->rq_req_unlinked,
2745                           request->rq_reply_unlinked);
2746         }
2747         RETURN(0);
2748 }
2749
2750 static void ptlrpc_free_request(struct ptlrpc_request *req)
2751 {
2752         spin_lock(&req->rq_lock);
2753         req->rq_replay = 0;
2754         spin_unlock(&req->rq_lock);
2755
2756         if (req->rq_commit_cb)
2757                 req->rq_commit_cb(req);
2758         list_del_init(&req->rq_replay_list);
2759
2760         __ptlrpc_req_finished(req, 1);
2761 }
2762
2763 /**
2764  * the request is committed and dropped from the replay list of its import
2765  */
2766 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2767 {
2768         struct obd_import *imp = req->rq_import;
2769
2770         spin_lock(&imp->imp_lock);
2771         if (list_empty(&req->rq_replay_list)) {
2772                 spin_unlock(&imp->imp_lock);
2773                 return;
2774         }
2775
2776         if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
2777                 if (imp->imp_replay_cursor == &req->rq_replay_list)
2778                         imp->imp_replay_cursor = req->rq_replay_list.next;
2779                 ptlrpc_free_request(req);
2780         }
2781
2782         spin_unlock(&imp->imp_lock);
2783 }
2784 EXPORT_SYMBOL(ptlrpc_request_committed);
2785
2786 /**
2787  * Iterates through replay_list on import and prunes
2788  * all requests have transno smaller than last_committed for the
2789  * import and don't have rq_replay set.
2790  * Since requests are sorted in transno order, stops when meetign first
2791  * transno bigger than last_committed.
2792  * caller must hold imp->imp_lock
2793  */
2794 void ptlrpc_free_committed(struct obd_import *imp)
2795 {
2796         struct ptlrpc_request *req, *saved;
2797         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
2798         bool skip_committed_list = true;
2799
2800         ENTRY;
2801         LASSERT(imp != NULL);
2802         assert_spin_locked(&imp->imp_lock);
2803
2804         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
2805             imp->imp_generation == imp->imp_last_generation_checked) {
2806                 CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
2807                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
2808                 RETURN_EXIT;
2809         }
2810         CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
2811                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
2812                imp->imp_generation);
2813
2814         if (imp->imp_generation != imp->imp_last_generation_checked ||
2815             imp->imp_last_transno_checked == 0)
2816                 skip_committed_list = false;
2817
2818         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
2819         imp->imp_last_generation_checked = imp->imp_generation;
2820
2821         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2822                                  rq_replay_list) {
2823                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2824                 LASSERT(req != last_req);
2825                 last_req = req;
2826
2827                 if (req->rq_transno == 0) {
2828                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2829                         LBUG();
2830                 }
2831                 if (req->rq_import_generation < imp->imp_generation) {
2832                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2833                         GOTO(free_req, 0);
2834                 }
2835
2836                 /* not yet committed */
2837                 if (req->rq_transno > imp->imp_peer_committed_transno) {
2838                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2839                         break;
2840                 }
2841
2842                 if (req->rq_replay) {
2843                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
2844                         list_move_tail(&req->rq_replay_list,
2845                                        &imp->imp_committed_list);
2846                         continue;
2847                 }
2848
2849                 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
2850                           imp->imp_peer_committed_transno);
2851 free_req:
2852                 ptlrpc_free_request(req);
2853         }
2854
2855         if (skip_committed_list)
2856                 GOTO(out, 0);
2857
2858         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
2859                                  rq_replay_list) {
2860                 LASSERT(req->rq_transno != 0);
2861                 if (req->rq_import_generation < imp->imp_generation ||
2862                     !req->rq_replay) {
2863                         DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
2864                                   req->rq_import_generation <
2865                                   imp->imp_generation ? "stale" : "closed");
2866
2867                         if (imp->imp_replay_cursor == &req->rq_replay_list)
2868                                 imp->imp_replay_cursor =
2869                                         req->rq_replay_list.next;
2870
2871                         ptlrpc_free_request(req);
2872                 }
2873         }
2874 out:
2875         EXIT;
2876 }
2877
2878 void ptlrpc_cleanup_client(struct obd_import *imp)
2879 {
2880         ENTRY;
2881         EXIT;
2882 }
2883
2884 /**
2885  * Schedule previously sent request for resend.
2886  * For bulk requests we assign new xid (to avoid problems with
2887  * lost replies and therefore several transfers landing into same buffer
2888  * from different sending attempts).
2889  */
2890 void ptlrpc_resend_req(struct ptlrpc_request *req)
2891 {
2892         DEBUG_REQ(D_HA, req, "going to resend");
2893         spin_lock(&req->rq_lock);
2894
2895         /*
2896          * Request got reply but linked to the import list still.
2897          * Let ptlrpc_check_set() process it.
2898          */
2899         if (ptlrpc_client_replied(req)) {
2900                 spin_unlock(&req->rq_lock);
2901                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
2902                 return;
2903         }
2904
2905         req->rq_status = -EAGAIN;
2906
2907         req->rq_resend = 1;
2908         req->rq_net_err = 0;
2909         req->rq_timedout = 0;
2910
2911         ptlrpc_client_wake_req(req);
2912         spin_unlock(&req->rq_lock);
2913 }
2914
2915 /* XXX: this function and rq_status are currently unused */
2916 void ptlrpc_restart_req(struct ptlrpc_request *req)
2917 {
2918         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
2919         req->rq_status = -ERESTARTSYS;
2920
2921         spin_lock(&req->rq_lock);
2922         req->rq_restart = 1;
2923         req->rq_timedout = 0;
2924         ptlrpc_client_wake_req(req);
2925         spin_unlock(&req->rq_lock);
2926 }
2927
2928 /**
2929  * Grab additional reference on a request \a req
2930  */
2931 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
2932 {
2933         ENTRY;
2934         atomic_inc(&req->rq_refcount);
2935         RETURN(req);
2936 }
2937 EXPORT_SYMBOL(ptlrpc_request_addref);
2938
2939 /**
2940  * Add a request to import replay_list.
2941  * Must be called under imp_lock
2942  */
2943 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2944                                       struct obd_import *imp)
2945 {
2946         struct list_head *tmp;
2947
2948         assert_spin_locked(&imp->imp_lock);
2949
2950         if (req->rq_transno == 0) {
2951                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
2952                 LBUG();
2953         }
2954
2955         /*
2956          * clear this for new requests that were resent as well
2957          * as resent replayed requests.
2958          */
2959         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2960
2961         /* don't re-add requests that have been replayed */
2962         if (!list_empty(&req->rq_replay_list))
2963                 return;
2964
2965         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
2966
2967         spin_lock(&req->rq_lock);
2968         req->rq_resend = 0;
2969         spin_unlock(&req->rq_lock);
2970
2971         LASSERT(imp->imp_replayable);
2972         /* Balanced in ptlrpc_free_committed, usually. */
2973         ptlrpc_request_addref(req);
2974         list_for_each_prev(tmp, &imp->imp_replay_list) {
2975                 struct ptlrpc_request *iter = list_entry(tmp,
2976                                                          struct ptlrpc_request,
2977                                                          rq_replay_list);
2978
2979                 /*
2980                  * We may have duplicate transnos if we create and then
2981                  * open a file, or for closes retained if to match creating
2982                  * opens, so use req->rq_xid as a secondary key.
2983                  * (See bugs 684, 685, and 428.)
2984                  * XXX no longer needed, but all opens need transnos!
2985                  */
2986                 if (iter->rq_transno > req->rq_transno)
2987                         continue;
2988
2989                 if (iter->rq_transno == req->rq_transno) {
2990                         LASSERT(iter->rq_xid != req->rq_xid);
2991                         if (iter->rq_xid > req->rq_xid)
2992                                 continue;
2993                 }
2994
2995                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
2996                 return;
2997         }
2998
2999         list_add(&req->rq_replay_list, &imp->imp_replay_list);
3000 }
3001
3002 /**
3003  * Send request and wait until it completes.
3004  * Returns request processing status.
3005  */
3006 int ptlrpc_queue_wait(struct ptlrpc_request *req)
3007 {
3008         struct ptlrpc_request_set *set;
3009         int rc;
3010
3011         ENTRY;
3012         LASSERT(req->rq_set == NULL);
3013         LASSERT(!req->rq_receiving_reply);
3014
3015         set = ptlrpc_prep_set();
3016         if (!set) {
3017                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
3018                 RETURN(-ENOMEM);
3019         }
3020
3021         /* for distributed debugging */
3022         lustre_msg_set_status(req->rq_reqmsg, current_pid());
3023
3024         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
3025         ptlrpc_request_addref(req);
3026         ptlrpc_set_add_req(set, req);
3027         rc = ptlrpc_set_wait(NULL, set);
3028         ptlrpc_set_destroy(set);
3029
3030         RETURN(rc);
3031 }
3032 EXPORT_SYMBOL(ptlrpc_queue_wait);
3033
3034 /**
3035  * Callback used for replayed requests reply processing.
3036  * In case of successful reply calls registered request replay callback.
3037  * In case of error restart replay process.
3038  */
3039 static int ptlrpc_replay_interpret(const struct lu_env *env,
3040                                    struct ptlrpc_request *req,
3041                                    void *args, int rc)
3042 {
3043         struct ptlrpc_replay_async_args *aa = args;
3044         struct obd_import *imp = req->rq_import;
3045
3046         ENTRY;
3047         atomic_dec(&imp->imp_replay_inflight);
3048
3049         /*
3050          * Note: if it is bulk replay (MDS-MDS replay), then even if
3051          * server got the request, but bulk transfer timeout, let's
3052          * replay the bulk req again
3053          */
3054         if (!ptlrpc_client_replied(req) ||
3055             (req->rq_bulk &&
3056              lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
3057                 DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
3058                 GOTO(out, rc = -ETIMEDOUT);
3059         }
3060
3061         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
3062             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
3063             lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
3064                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
3065
3066         /** VBR: check version failure */
3067         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
3068                 /** replay was failed due to version mismatch */
3069                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
3070                 spin_lock(&imp->imp_lock);
3071                 imp->imp_vbr_failed = 1;
3072                 spin_unlock(&imp->imp_lock);
3073                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3074         } else {
3075                 /** The transno had better not change over replay. */
3076                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
3077                          lustre_msg_get_transno(req->rq_repmsg) ||
3078                          lustre_msg_get_transno(req->rq_repmsg) == 0,
3079                          "%#llx/%#llx\n",
3080                          lustre_msg_get_transno(req->rq_reqmsg),
3081                          lustre_msg_get_transno(req->rq_repmsg));
3082         }
3083
3084         spin_lock(&imp->imp_lock);
3085         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
3086         spin_unlock(&imp->imp_lock);
3087         LASSERT(imp->imp_last_replay_transno);
3088
3089         /* transaction number shouldn't be bigger than the latest replayed */
3090         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
3091                 DEBUG_REQ(D_ERROR, req,
3092                           "Reported transno %llu is bigger than the replayed one: %llu",
3093                           req->rq_transno,
3094                           lustre_msg_get_transno(req->rq_reqmsg));
3095                 GOTO(out, rc = -EINVAL);
3096         }
3097
3098         DEBUG_REQ(D_HA, req, "got rep");
3099
3100         /* let the callback do fixups, possibly including in the request */
3101         if (req->rq_replay_cb)
3102                 req->rq_replay_cb(req);
3103
3104         if (ptlrpc_client_replied(req) &&
3105             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
3106                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
3107                           lustre_msg_get_status(req->rq_repmsg),
3108                           aa->praa_old_status);
3109
3110                 /*
3111                  * Note: If the replay fails for MDT-MDT recovery, let's
3112                  * abort all of the following requests in the replay
3113                  * and sending list, because MDT-MDT update requests
3114                  * are dependent on each other, see LU-7039
3115                  */
3116                 if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
3117                         struct ptlrpc_request *free_req;
3118                         struct ptlrpc_request *tmp;
3119
3120                         spin_lock(&imp->imp_lock);
3121                         list_for_each_entry_safe(free_req, tmp,
3122                                                  &imp->imp_replay_list,
3123                                                  rq_replay_list) {
3124                                 ptlrpc_free_request(free_req);
3125                         }
3126
3127                         list_for_each_entry_safe(free_req, tmp,
3128                                                  &imp->imp_committed_list,
3129                                                  rq_replay_list) {
3130                                 ptlrpc_free_request(free_req);
3131                         }
3132
3133                         list_for_each_entry_safe(free_req, tmp,
3134                                                  &imp->imp_delayed_list,
3135                                                  rq_list) {
3136                                 spin_lock(&free_req->rq_lock);
3137                                 free_req->rq_err = 1;
3138                                 free_req->rq_status = -EIO;
3139                                 ptlrpc_client_wake_req(free_req);
3140                                 spin_unlock(&free_req->rq_lock);
3141                         }
3142
3143                         list_for_each_entry_safe(free_req, tmp,
3144                                                  &imp->imp_sending_list,
3145                                                  rq_list) {
3146                                 spin_lock(&free_req->rq_lock);
3147                                 free_req->rq_err = 1;
3148                                 free_req->rq_status = -EIO;
3149                                 ptlrpc_client_wake_req(free_req);
3150                                 spin_unlock(&free_req->rq_lock);
3151                         }
3152                         spin_unlock(&imp->imp_lock);
3153                 }
3154         } else {
3155                 /* Put it back for re-replay. */
3156                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
3157         }
3158
3159         /*
3160          * Errors while replay can set transno to 0, but
3161          * imp_last_replay_transno shouldn't be set to 0 anyway
3162          */
3163         if (req->rq_transno == 0)
3164                 CERROR("Transno is 0 during replay!\n");
3165
3166         /* continue with recovery */
3167         rc = ptlrpc_import_recovery_state_machine(imp);
3168  out:
3169         req->rq_send_state = aa->praa_old_state;
3170
3171         if (rc != 0)
3172                 /* this replay failed, so restart recovery */
3173                 ptlrpc_connect_import(imp);
3174
3175         RETURN(rc);
3176 }
3177
3178 /**
3179  * Prepares and queues request for replay.
3180  * Adds it to ptlrpcd queue for actual sending.
3181  * Returns 0 on success.
3182  */
3183 int ptlrpc_replay_req(struct ptlrpc_request *req)
3184 {
3185         struct ptlrpc_replay_async_args *aa;
3186
3187         ENTRY;
3188
3189         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
3190
3191         aa = ptlrpc_req_async_args(aa, req);
3192         memset(aa, 0, sizeof(*aa));
3193
3194         /* Prepare request to be resent with ptlrpcd */
3195         aa->praa_old_state = req->rq_send_state;
3196         req->rq_send_state = LUSTRE_IMP_REPLAY;
3197         req->rq_phase = RQ_PHASE_NEW;
3198         req->rq_next_phase = RQ_PHASE_UNDEFINED;
3199         if (req->rq_repmsg)
3200                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
3201         req->rq_status = 0;
3202         req->rq_interpret_reply = ptlrpc_replay_interpret;
3203         /* Readjust the timeout for current conditions */
3204         ptlrpc_at_set_req_timeout(req);
3205
3206         /* Tell server net_latency to calculate how long to wait for reply. */
3207         lustre_msg_set_service_time(req->rq_reqmsg,
3208                                     ptlrpc_at_get_net_latency(req));
3209         DEBUG_REQ(D_HA, req, "REPLAY");
3210
3211         atomic_inc(&req->rq_import->imp_replay_inflight);
3212         spin_lock(&req->rq_lock);
3213         req->rq_early_free_repbuf = 0;
3214         spin_unlock(&req->rq_lock);
3215         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
3216
3217         ptlrpcd_add_req(req);
3218         RETURN(0);
3219 }
3220
3221 /**
3222  * Aborts all in-flight request on import \a imp sending and delayed lists
3223  */
3224 void ptlrpc_abort_inflight(struct obd_import *imp)
3225 {
3226         struct list_head *tmp, *n;
3227         ENTRY;
3228
3229         /*
3230          * Make sure that no new requests get processed for this import.
3231          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
3232          * this flag and then putting requests on sending_list or delayed_list.
3233          */
3234         assert_spin_locked(&imp->imp_lock);
3235
3236         /*
3237          * XXX locking?  Maybe we should remove each request with the list
3238          * locked?  Also, how do we know if the requests on the list are
3239          * being freed at this time?
3240          */
3241         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
3242                 struct ptlrpc_request *req = list_entry(tmp,
3243                                                         struct ptlrpc_request,
3244                                                         rq_list);
3245
3246                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
3247
3248                 spin_lock(&req->rq_lock);
3249                 if (req->rq_import_generation < imp->imp_generation) {
3250                         req->rq_err = 1;
3251                         req->rq_status = -EIO;
3252                         ptlrpc_client_wake_req(req);
3253                 }
3254                 spin_unlock(&req->rq_lock);
3255         }
3256
3257         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
3258                 struct ptlrpc_request *req =
3259                         list_entry(tmp, struct ptlrpc_request, rq_list);
3260
3261                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
3262
3263                 spin_lock(&req->rq_lock);
3264                 if (req->rq_import_generation < imp->imp_generation) {
3265                         req->rq_err = 1;
3266                         req->rq_status = -EIO;
3267                         ptlrpc_client_wake_req(req);
3268                 }
3269                 spin_unlock(&req->rq_lock);
3270         }
3271
3272         /*
3273          * Last chance to free reqs left on the replay list, but we
3274          * will still leak reqs that haven't committed.
3275          */
3276         if (imp->imp_replayable)
3277                 ptlrpc_free_committed(imp);
3278
3279         EXIT;
3280 }
3281
3282 /**
3283  * Abort all uncompleted requests in request set \a set
3284  */
3285 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
3286 {
3287         struct list_head *tmp, *pos;
3288
3289         LASSERT(set != NULL);
3290
3291         list_for_each_safe(pos, tmp, &set->set_requests) {
3292                 struct ptlrpc_request *req =
3293                         list_entry(pos, struct ptlrpc_request,
3294                                    rq_set_chain);
3295
3296                 spin_lock(&req->rq_lock);
3297                 if (req->rq_phase != RQ_PHASE_RPC) {
3298                         spin_unlock(&req->rq_lock);
3299                         continue;
3300                 }
3301
3302                 req->rq_err = 1;
3303                 req->rq_status = -EINTR;
3304                 ptlrpc_client_wake_req(req);
3305                 spin_unlock(&req->rq_lock);
3306         }
3307 }
3308
3309 /**
3310  * Initialize the XID for the node.  This is common among all requests on
3311  * this node, and only requires the property that it is monotonically
3312  * increasing.  It does not need to be sequential.  Since this is also used
3313  * as the RDMA match bits, it is important that a single client NOT have
3314  * the same match bits for two different in-flight requests, hence we do
3315  * NOT want to have an XID per target or similar.
3316  *
3317  * To avoid an unlikely collision between match bits after a client reboot
3318  * (which would deliver old data into the wrong RDMA buffer) initialize
3319  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
3320  * If the time is clearly incorrect, we instead use a 62-bit random number.
3321  * In the worst case the random number will overflow 1M RPCs per second in
3322  * 9133 years, or permutations thereof.
3323  */
3324 #define YEAR_2004 (1ULL << 30)
3325 void ptlrpc_init_xid(void)
3326 {
3327         time64_t now = ktime_get_real_seconds();
3328         u64 xid;
3329
3330         if (now < YEAR_2004) {
3331                 get_random_bytes(&xid, sizeof(xid));
3332                 xid >>= 2;
3333                 xid |= (1ULL << 61);
3334         } else {
3335                 xid = (u64)now << 20;
3336         }
3337
3338         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
3339         CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
3340         xid &= PTLRPC_BULK_OPS_MASK;
3341         atomic64_set(&ptlrpc_last_xid, xid);
3342 }
3343
3344 /**
3345  * Increase xid and returns resulting new value to the caller.
3346  *
3347  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
3348  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
3349  * itself uses the last bulk xid needed, so the server can determine the
3350  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
3351  * xid must align to a power-of-two value.
3352  *
3353  * This is assumed to be true due to the initial ptlrpc_last_xid
3354  * value also being initialized to a power-of-two value. LU-1431
3355  */
3356 __u64 ptlrpc_next_xid(void)
3357 {
3358         return atomic64_add_return(PTLRPC_BULK_OPS_COUNT, &ptlrpc_last_xid);
3359 }
3360
3361 /**
3362  * If request has a new allocated XID (new request or EINPROGRESS resend),
3363  * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
3364  * request to ensure previous bulk fails and avoid problems with lost replies
3365  * and therefore several transfers landing into the same buffer from different
3366  * sending attempts.
3367  */
3368 void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
3369 {
3370         struct ptlrpc_bulk_desc *bd = req->rq_bulk;
3371
3372         LASSERT(bd != NULL);
3373
3374         /*
3375          * Generate new matchbits for all resend requests, including
3376          * resend replay.
3377          */
3378         if (req->rq_resend) {
3379                 __u64 old_mbits = req->rq_mbits;
3380
3381                 /*
3382                  * First time resend on -EINPROGRESS will generate new xid,
3383                  * so we can actually use the rq_xid as rq_mbits in such case,
3384                  * however, it's bit hard to distinguish such resend with a
3385                  * 'resend for the -EINPROGRESS resend'. To make it simple,
3386                  * we opt to generate mbits for all resend cases.
3387                  */
3388                 if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data,
3389                                  BULK_MBITS)) {
3390                         req->rq_mbits = ptlrpc_next_xid();
3391                 } else {
3392                         /*
3393                          * Old version transfers rq_xid to peer as
3394                          * matchbits.
3395                          */
3396                         spin_lock(&req->rq_import->imp_lock);
3397                         list_del_init(&req->rq_unreplied_list);
3398                         ptlrpc_assign_next_xid_nolock(req);
3399                         spin_unlock(&req->rq_import->imp_lock);
3400                         req->rq_mbits = req->rq_xid;
3401                 }
3402                 CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
3403                        old_mbits, req->rq_mbits);
3404         } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
3405                 /* Request being sent first time, use xid as matchbits. */
3406                 if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS)
3407                     || req->rq_mbits == 0) {
3408                         req->rq_mbits = req->rq_xid;
3409                 } else {
3410                         int total_md = (bd->bd_iov_count + LNET_MAX_IOV - 1) /
3411                                         LNET_MAX_IOV;
3412                         req->rq_mbits -= total_md - 1;
3413                 }
3414         } else {
3415                 /*
3416                  * Replay request, xid and matchbits have already been
3417                  * correctly assigned.
3418                  */
3419                 return;
3420         }
3421
3422         /*
3423          * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
3424          * that server can infer the number of bulks that were prepared,
3425          * see LU-1431
3426          */
3427         req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
3428                           LNET_MAX_IOV) - 1;
3429
3430         /*
3431          * Set rq_xid as rq_mbits to indicate the final bulk for the old
3432          * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
3433          *
3434          * It's ok to directly set the rq_xid here, since this xid bump
3435          * won't affect the request position in unreplied list.
3436          */
3437         if (!OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS))
3438                 req->rq_xid = req->rq_mbits;
3439 }
3440
3441 /**
3442  * Get a glimpse at what next xid value might have been.
3443  * Returns possible next xid.
3444  */
3445 __u64 ptlrpc_sample_next_xid(void)
3446 {
3447         return atomic64_read(&ptlrpc_last_xid) + PTLRPC_BULK_OPS_COUNT;
3448 }
3449 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3450
3451 /**
3452  * Functions for operating ptlrpc workers.
3453  *
3454  * A ptlrpc work is a function which will be running inside ptlrpc context.
3455  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3456  *
3457  * 1. after a work is created, it can be used many times, that is:
3458  *         handler = ptlrpcd_alloc_work();
3459  *         ptlrpcd_queue_work();
3460  *
3461  *    queue it again when necessary:
3462  *         ptlrpcd_queue_work();
3463  *         ptlrpcd_destroy_work();
3464  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3465  *    it will only be queued once in any time. Also as its name implies, it may
3466  *    have delay before it really runs by ptlrpcd thread.
3467  */
3468 struct ptlrpc_work_async_args {
3469         int (*cb)(const struct lu_env *, void *);
3470         void *cbdata;
3471 };
3472
3473 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3474 {
3475         /* re-initialize the req */
3476         req->rq_timeout         = obd_timeout;
3477         req->rq_sent            = ktime_get_real_seconds();
3478         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3479         req->rq_phase           = RQ_PHASE_INTERPRET;
3480         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3481         req->rq_xid             = ptlrpc_next_xid();
3482         req->rq_import_generation = req->rq_import->imp_generation;
3483
3484         ptlrpcd_add_req(req);
3485 }
3486
3487 static int work_interpreter(const struct lu_env *env,
3488                             struct ptlrpc_request *req, void *args, int rc)
3489 {
3490         struct ptlrpc_work_async_args *arg = args;
3491
3492         LASSERT(ptlrpcd_check_work(req));
3493         LASSERT(arg->cb != NULL);
3494
3495         rc = arg->cb(env, arg->cbdata);
3496
3497         list_del_init(&req->rq_set_chain);
3498         req->rq_set = NULL;
3499
3500         if (atomic_dec_return(&req->rq_refcount) > 1) {
3501                 atomic_set(&req->rq_refcount, 2);
3502                 ptlrpcd_add_work_req(req);
3503         }
3504         return rc;
3505 }
3506
3507 static int worker_format;
3508
3509 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3510 {
3511         return req->rq_pill.rc_fmt == (void *)&worker_format;
3512 }
3513
3514 /**
3515  * Create a work for ptlrpc.
3516  */
3517 void *ptlrpcd_alloc_work(struct obd_import *imp,
3518                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3519 {
3520         struct ptlrpc_request *req = NULL;
3521         struct ptlrpc_work_async_args *args;
3522
3523         ENTRY;
3524         might_sleep();
3525
3526         if (!cb)
3527                 RETURN(ERR_PTR(-EINVAL));
3528
3529         /* copy some code from deprecated fakereq. */
3530         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3531         if (!req) {
3532                 CERROR("ptlrpc: run out of memory!\n");
3533                 RETURN(ERR_PTR(-ENOMEM));
3534         }
3535
3536         ptlrpc_cli_req_init(req);
3537
3538         req->rq_send_state = LUSTRE_IMP_FULL;
3539         req->rq_type = PTL_RPC_MSG_REQUEST;
3540         req->rq_import = class_import_get(imp);
3541         req->rq_interpret_reply = work_interpreter;
3542         /* don't want reply */
3543         req->rq_no_delay = req->rq_no_resend = 1;
3544         req->rq_pill.rc_fmt = (void *)&worker_format;
3545
3546         args = ptlrpc_req_async_args(args, req);
3547         args->cb     = cb;
3548         args->cbdata = cbdata;
3549
3550         RETURN(req);
3551 }
3552 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3553
3554 void ptlrpcd_destroy_work(void *handler)
3555 {
3556         struct ptlrpc_request *req = handler;
3557
3558         if (req)
3559                 ptlrpc_req_finished(req);
3560 }
3561 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3562
3563 int ptlrpcd_queue_work(void *handler)
3564 {
3565         struct ptlrpc_request *req = handler;
3566
3567         /*
3568          * Check if the req is already being queued.
3569          *
3570          * Here comes a trick: it lacks a way of checking if a req is being
3571          * processed reliably in ptlrpc. Here I have to use refcount of req
3572          * for this purpose. This is okay because the caller should use this
3573          * req as opaque data. - Jinshan
3574          */
3575         LASSERT(atomic_read(&req->rq_refcount) > 0);
3576         if (atomic_inc_return(&req->rq_refcount) == 2)
3577                 ptlrpcd_add_work_req(req);
3578         return 0;
3579 }
3580 EXPORT_SYMBOL(ptlrpcd_queue_work);