Whamcloud - gitweb
LU-4629 ldlm: fix uninitialized variable
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 /** Implementation of client-side PortalRPC interfaces */
38
39 #define DEBUG_SUBSYSTEM S_RPC
40 #ifndef __KERNEL__
41 #include <errno.h>
42 #include <signal.h>
43 #include <liblustre.h>
44 #endif
45
46 #include <obd_support.h>
47 #include <obd_class.h>
48 #include <lustre_lib.h>
49 #include <lustre_ha.h>
50 #include <lustre_import.h>
51 #include <lustre_req_layout.h>
52
53 #include "ptlrpc_internal.h"
54
55 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
56 static int ptlrpcd_check_work(struct ptlrpc_request *req);
57
58 /**
59  * Initialize passed in client structure \a cl.
60  */
61 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
62                         struct ptlrpc_client *cl)
63 {
64         cl->cli_request_portal = req_portal;
65         cl->cli_reply_portal   = rep_portal;
66         cl->cli_name           = name;
67 }
68 EXPORT_SYMBOL(ptlrpc_init_client);
69
70 /**
71  * Return PortalRPC connection for remore uud \a uuid
72  */
73 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
74 {
75         struct ptlrpc_connection *c;
76         lnet_nid_t                self;
77         lnet_process_id_t         peer;
78         int                       err;
79
80         /* ptlrpc_uuid_to_peer() initializes its 2nd parameter
81          * before accessing its values. */
82         /* coverity[uninit_use_in_call] */
83         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
84         if (err != 0) {
85                 CNETERR("cannot find peer %s!\n", uuid->uuid);
86                 return NULL;
87         }
88
89         c = ptlrpc_connection_get(peer, self, uuid);
90         if (c) {
91                 memcpy(c->c_remote_uuid.uuid,
92                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
93         }
94
95         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
96
97         return c;
98 }
99 EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
100
101 /**
102  * Allocate and initialize new bulk descriptor on the sender.
103  * Returns pointer to the descriptor or NULL on error.
104  */
105 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
106                                          unsigned type, unsigned portal)
107 {
108         struct ptlrpc_bulk_desc *desc;
109         int i;
110
111         OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]));
112         if (!desc)
113                 return NULL;
114
115         spin_lock_init(&desc->bd_lock);
116         init_waitqueue_head(&desc->bd_waitq);
117         desc->bd_max_iov = npages;
118         desc->bd_iov_count = 0;
119         desc->bd_portal = portal;
120         desc->bd_type = type;
121         desc->bd_md_count = 0;
122         LASSERT(max_brw > 0);
123         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
124         /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
125          * node. Negotiated ocd_brw_size will always be <= this number. */
126         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
127                 LNetInvalidateHandle(&desc->bd_mds[i]);
128
129         return desc;
130 }
131
132 /**
133  * Prepare bulk descriptor for specified outgoing request \a req that
134  * can fit \a npages * pages. \a type is bulk type. \a portal is where
135  * the bulk to be sent. Used on client-side.
136  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
137  * error.
138  */
139 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
140                                               unsigned npages, unsigned max_brw,
141                                               unsigned type, unsigned portal)
142 {
143         struct obd_import *imp = req->rq_import;
144         struct ptlrpc_bulk_desc *desc;
145
146         ENTRY;
147         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
148         desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
149         if (desc == NULL)
150                 RETURN(NULL);
151
152         desc->bd_import_generation = req->rq_import_generation;
153         desc->bd_import = class_import_get(imp);
154         desc->bd_req = req;
155
156         desc->bd_cbid.cbid_fn  = client_bulk_callback;
157         desc->bd_cbid.cbid_arg = desc;
158
159         /* This makes req own desc, and free it when she frees herself */
160         req->rq_bulk = desc;
161
162         return desc;
163 }
164 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
165
166 /*
167  * Add a page \a page to the bulk descriptor \a desc.
168  * Data to transfer in the page starts at offset \a pageoffset and
169  * amount of data to transfer from the page is \a len
170  */
171 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
172                              struct page *page, int pageoffset, int len, int pin)
173 {
174         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
175         LASSERT(page != NULL);
176         LASSERT(pageoffset >= 0);
177         LASSERT(len > 0);
178         LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
179
180         desc->bd_nob += len;
181
182         if (pin)
183                 page_cache_get(page);
184
185         ptlrpc_add_bulk_page(desc, page, pageoffset, len);
186 }
187 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
188
189 /**
190  * Uninitialize and free bulk descriptor \a desc.
191  * Works on bulk descriptors both from server and client side.
192  */
193 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
194 {
195         int i;
196         ENTRY;
197
198         LASSERT(desc != NULL);
199         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
200         LASSERT(desc->bd_md_count == 0);         /* network hands off */
201         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
202
203         sptlrpc_enc_pool_put_pages(desc);
204
205         if (desc->bd_export)
206                 class_export_put(desc->bd_export);
207         else
208                 class_import_put(desc->bd_import);
209
210         if (unpin) {
211                 for (i = 0; i < desc->bd_iov_count ; i++)
212                         page_cache_release(desc->bd_iov[i].kiov_page);
213         }
214
215         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
216                                 bd_iov[desc->bd_max_iov]));
217         EXIT;
218 }
219 EXPORT_SYMBOL(__ptlrpc_free_bulk);
220
221 /**
222  * Set server timelimit for this req, i.e. how long are we willing to wait
223  * for reply before timing out this request.
224  */
225 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
226 {
227         __u32 serv_est;
228         int idx;
229         struct imp_at *at;
230
231         LASSERT(req->rq_import);
232
233         if (AT_OFF) {
234                 /* non-AT settings */
235                 /**
236                  * \a imp_server_timeout means this is reverse import and
237                  * we send (currently only) ASTs to the client and cannot afford
238                  * to wait too long for the reply, otherwise the other client
239                  * (because of which we are sending this request) would
240                  * timeout waiting for us
241                  */
242                 req->rq_timeout = req->rq_import->imp_server_timeout ?
243                                   obd_timeout / 2 : obd_timeout;
244         } else {
245                 at = &req->rq_import->imp_at;
246                 idx = import_at_get_index(req->rq_import,
247                                           req->rq_request_portal);
248                 serv_est = at_get(&at->iat_service_estimate[idx]);
249                 req->rq_timeout = at_est2timeout(serv_est);
250         }
251         /* We could get even fancier here, using history to predict increased
252            loading... */
253
254         /* Let the server know what this RPC timeout is by putting it in the
255            reqmsg*/
256         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
257 }
258 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
259
260 /* Adjust max service estimate based on server value */
261 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
262                                   unsigned int serv_est)
263 {
264         int idx;
265         unsigned int oldse;
266         struct imp_at *at;
267
268         LASSERT(req->rq_import);
269         at = &req->rq_import->imp_at;
270
271         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
272         /* max service estimates are tracked on the server side,
273            so just keep minimal history here */
274         oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
275         if (oldse != 0)
276                 CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
277                        "has changed from %d to %d\n",
278                        req->rq_import->imp_obd->obd_name,req->rq_request_portal,
279                        oldse, at_get(&at->iat_service_estimate[idx]));
280 }
281
282 /* Expected network latency per remote node (secs) */
283 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
284 {
285         return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
286 }
287
288 /* Adjust expected network latency */
289 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
290                                       unsigned int service_time)
291 {
292         unsigned int nl, oldnl;
293         struct imp_at *at;
294         time_t now = cfs_time_current_sec();
295
296         LASSERT(req->rq_import);
297         at = &req->rq_import->imp_at;
298
299         /* Network latency is total time less server processing time */
300         nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/;
301         if (service_time > now - req->rq_sent + 3 /* bz16408 */)
302                 CWARN("Reported service time %u > total measured time "
303                       CFS_DURATION_T"\n", service_time,
304                       cfs_time_sub(now, req->rq_sent));
305
306         oldnl = at_measured(&at->iat_net_latency, nl);
307         if (oldnl != 0)
308                 CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
309                        "has changed from %d to %d\n",
310                        req->rq_import->imp_obd->obd_name,
311                        obd_uuid2str(
312                                &req->rq_import->imp_connection->c_remote_uuid),
313                        oldnl, at_get(&at->iat_net_latency));
314 }
315
316 static int unpack_reply(struct ptlrpc_request *req)
317 {
318         int rc;
319
320         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
321                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
322                 if (rc) {
323                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
324                         return(-EPROTO);
325                 }
326         }
327
328         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
329         if (rc) {
330                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
331                 return(-EPROTO);
332         }
333         return 0;
334 }
335
336 /**
337  * Handle an early reply message, called with the rq_lock held.
338  * If anything goes wrong just ignore it - same as if it never happened
339  */
340 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
341 {
342         struct ptlrpc_request *early_req;
343         time_t                 olddl;
344         int                    rc;
345         ENTRY;
346
347         req->rq_early = 0;
348         spin_unlock(&req->rq_lock);
349
350         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
351         if (rc) {
352                 spin_lock(&req->rq_lock);
353                 RETURN(rc);
354         }
355
356         rc = unpack_reply(early_req);
357         if (rc == 0) {
358                 /* Expecting to increase the service time estimate here */
359                 ptlrpc_at_adj_service(req,
360                         lustre_msg_get_timeout(early_req->rq_repmsg));
361                 ptlrpc_at_adj_net_latency(req,
362                         lustre_msg_get_service_time(early_req->rq_repmsg));
363         }
364
365         sptlrpc_cli_finish_early_reply(early_req);
366
367         if (rc != 0) {
368                 spin_lock(&req->rq_lock);
369                 RETURN(rc);
370         }
371
372         /* Adjust the local timeout for this req */
373         ptlrpc_at_set_req_timeout(req);
374
375         spin_lock(&req->rq_lock);
376         olddl = req->rq_deadline;
377         /* server assumes it now has rq_timeout from when the request
378          * arrived, so the client should give it at least that long.
379          * since we don't know the arrival time we'll use the original
380          * sent time */
381         req->rq_deadline = req->rq_sent + req->rq_timeout +
382                            ptlrpc_at_get_net_latency(req);
383
384         DEBUG_REQ(D_ADAPTTO, req,
385                   "Early reply #%d, new deadline in "CFS_DURATION_T"s "
386                   "("CFS_DURATION_T"s)", req->rq_early_count,
387                   cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
388                   cfs_time_sub(req->rq_deadline, olddl));
389
390         RETURN(rc);
391 }
392
393 struct kmem_cache *request_cache;
394
395 int ptlrpc_request_cache_init(void)
396 {
397         request_cache = kmem_cache_create("ptlrpc_cache",
398                                           sizeof(struct ptlrpc_request),
399                                           0, SLAB_HWCACHE_ALIGN, NULL);
400         return request_cache == NULL ? -ENOMEM : 0;
401 }
402
403 void ptlrpc_request_cache_fini(void)
404 {
405         kmem_cache_destroy(request_cache);
406 }
407
408 struct ptlrpc_request *ptlrpc_request_cache_alloc(int flags)
409 {
410         struct ptlrpc_request *req;
411
412         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
413         return req;
414 }
415
416 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
417 {
418         OBD_SLAB_FREE_PTR(req, request_cache);
419 }
420
421 /**
422  * Wind down request pool \a pool.
423  * Frees all requests from the pool too
424  */
425 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
426 {
427         cfs_list_t *l, *tmp;
428         struct ptlrpc_request *req;
429
430         LASSERT(pool != NULL);
431
432         spin_lock(&pool->prp_lock);
433         cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
434                 req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
435                 cfs_list_del(&req->rq_list);
436                 LASSERT(req->rq_reqbuf);
437                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
438                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
439                 ptlrpc_request_cache_free(req);
440         }
441         spin_unlock(&pool->prp_lock);
442         OBD_FREE(pool, sizeof(*pool));
443 }
444 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
445
446 /**
447  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
448  */
449 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
450 {
451         int i;
452         int size = 1;
453
454         while (size < pool->prp_rq_size)
455                 size <<= 1;
456
457         LASSERTF(cfs_list_empty(&pool->prp_req_list) ||
458                  size == pool->prp_rq_size,
459                  "Trying to change pool size with nonempty pool "
460                  "from %d to %d bytes\n", pool->prp_rq_size, size);
461
462         spin_lock(&pool->prp_lock);
463         pool->prp_rq_size = size;
464         for (i = 0; i < num_rq; i++) {
465                 struct ptlrpc_request *req;
466                 struct lustre_msg *msg;
467
468                 spin_unlock(&pool->prp_lock);
469                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
470                 if (!req)
471                         return;
472                 OBD_ALLOC_LARGE(msg, size);
473                 if (!msg) {
474                         ptlrpc_request_cache_free(req);
475                         return;
476                 }
477                 req->rq_reqbuf = msg;
478                 req->rq_reqbuf_len = size;
479                 req->rq_pool = pool;
480                 spin_lock(&pool->prp_lock);
481                 cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
482         }
483         spin_unlock(&pool->prp_lock);
484         return;
485 }
486 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
487
488 /**
489  * Create and initialize new request pool with given attributes:
490  * \a num_rq - initial number of requests to create for the pool
491  * \a msgsize - maximum message size possible for requests in thid pool
492  * \a populate_pool - function to be called when more requests need to be added
493  *                    to the pool
494  * Returns pointer to newly created pool or NULL on error.
495  */
496 struct ptlrpc_request_pool *
497 ptlrpc_init_rq_pool(int num_rq, int msgsize,
498                     void (*populate_pool)(struct ptlrpc_request_pool *, int))
499 {
500         struct ptlrpc_request_pool *pool;
501
502         OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
503         if (!pool)
504                 return NULL;
505
506         /* Request next power of two for the allocation, because internally
507            kernel would do exactly this */
508
509         spin_lock_init(&pool->prp_lock);
510         CFS_INIT_LIST_HEAD(&pool->prp_req_list);
511         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
512         pool->prp_populate = populate_pool;
513
514         populate_pool(pool, num_rq);
515
516         if (cfs_list_empty(&pool->prp_req_list)) {
517                 /* have not allocated a single request for the pool */
518                 OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
519                 pool = NULL;
520         }
521         return pool;
522 }
523 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
524
525 /**
526  * Fetches one request from pool \a pool
527  */
528 static struct ptlrpc_request *
529 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
530 {
531         struct ptlrpc_request *request;
532         struct lustre_msg *reqbuf;
533
534         if (!pool)
535                 return NULL;
536
537         spin_lock(&pool->prp_lock);
538
539         /* See if we have anything in a pool, and bail out if nothing,
540          * in writeout path, where this matters, this is safe to do, because
541          * nothing is lost in this case, and when some in-flight requests
542          * complete, this code will be called again. */
543         if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
544                 spin_unlock(&pool->prp_lock);
545                 return NULL;
546         }
547
548         request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
549                                  rq_list);
550         cfs_list_del_init(&request->rq_list);
551         spin_unlock(&pool->prp_lock);
552
553         LASSERT(request->rq_reqbuf);
554         LASSERT(request->rq_pool);
555
556         reqbuf = request->rq_reqbuf;
557         memset(request, 0, sizeof(*request));
558         request->rq_reqbuf = reqbuf;
559         request->rq_reqbuf_len = pool->prp_rq_size;
560         request->rq_pool = pool;
561
562         return request;
563 }
564
565 /**
566  * Returns freed \a request to pool.
567  */
568 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
569 {
570         struct ptlrpc_request_pool *pool = request->rq_pool;
571
572         spin_lock(&pool->prp_lock);
573         LASSERT(cfs_list_empty(&request->rq_list));
574         LASSERT(!request->rq_receiving_reply);
575         cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
576         spin_unlock(&pool->prp_lock);
577 }
578
579 static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
580                                       __u32 version, int opcode,
581                                       int count, __u32 *lengths, char **bufs,
582                                       struct ptlrpc_cli_ctx *ctx)
583 {
584         struct obd_import  *imp = request->rq_import;
585         int                 rc;
586         ENTRY;
587
588         if (unlikely(ctx))
589                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
590         else {
591                 rc = sptlrpc_req_get_ctx(request);
592                 if (rc)
593                         GOTO(out_free, rc);
594         }
595
596         sptlrpc_req_set_flavor(request, opcode);
597
598         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
599                                  lengths, bufs);
600         if (rc) {
601                 LASSERT(!request->rq_pool);
602                 GOTO(out_ctx, rc);
603         }
604
605         lustre_msg_add_version(request->rq_reqmsg, version);
606         request->rq_send_state = LUSTRE_IMP_FULL;
607         request->rq_type = PTL_RPC_MSG_REQUEST;
608         request->rq_export = NULL;
609
610         request->rq_req_cbid.cbid_fn  = request_out_callback;
611         request->rq_req_cbid.cbid_arg = request;
612
613         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
614         request->rq_reply_cbid.cbid_arg = request;
615
616         request->rq_reply_deadline = 0;
617         request->rq_phase = RQ_PHASE_NEW;
618         request->rq_next_phase = RQ_PHASE_UNDEFINED;
619
620         request->rq_request_portal = imp->imp_client->cli_request_portal;
621         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
622
623         ptlrpc_at_set_req_timeout(request);
624
625         spin_lock_init(&request->rq_lock);
626         CFS_INIT_LIST_HEAD(&request->rq_list);
627         CFS_INIT_LIST_HEAD(&request->rq_timed_list);
628         CFS_INIT_LIST_HEAD(&request->rq_replay_list);
629         CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
630         CFS_INIT_LIST_HEAD(&request->rq_set_chain);
631         CFS_INIT_LIST_HEAD(&request->rq_history_list);
632         CFS_INIT_LIST_HEAD(&request->rq_exp_list);
633         init_waitqueue_head(&request->rq_reply_waitq);
634         init_waitqueue_head(&request->rq_set_waitq);
635         request->rq_xid = ptlrpc_next_xid();
636         atomic_set(&request->rq_refcount, 1);
637
638         lustre_msg_set_opc(request->rq_reqmsg, opcode);
639
640         RETURN(0);
641 out_ctx:
642         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
643 out_free:
644         class_import_put(imp);
645         return rc;
646 }
647
648 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
649                              __u32 version, int opcode, char **bufs,
650                              struct ptlrpc_cli_ctx *ctx)
651 {
652         int count;
653
654         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
655         return __ptlrpc_request_bufs_pack(request, version, opcode, count,
656                                           request->rq_pill.rc_area[RCL_CLIENT],
657                                           bufs, ctx);
658 }
659 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
660
661 /**
662  * Pack request buffers for network transfer, performing necessary encryption
663  * steps if necessary.
664  */
665 int ptlrpc_request_pack(struct ptlrpc_request *request,
666                         __u32 version, int opcode)
667 {
668         int rc;
669         rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
670         if (rc)
671                 return rc;
672
673         /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
674          * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
675          * have to send old ptlrpc_body to keep interoprability with these
676          * clients.
677          *
678          * Only three kinds of server->client RPCs so far:
679          *  - LDLM_BL_CALLBACK
680          *  - LDLM_CP_CALLBACK
681          *  - LDLM_GL_CALLBACK
682          *
683          * XXX This should be removed whenever we drop the interoprability with
684          *     the these old clients.
685          */
686         if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
687             opcode == LDLM_GL_CALLBACK)
688                 req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY,
689                                    sizeof(struct ptlrpc_body_v2), RCL_CLIENT);
690
691         return rc;
692 }
693 EXPORT_SYMBOL(ptlrpc_request_pack);
694
695 /**
696  * Helper function to allocate new request on import \a imp
697  * and possibly using existing request from pool \a pool if provided.
698  * Returns allocated request structure with import field filled or
699  * NULL on error.
700  */
701 static inline
702 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
703                                               struct ptlrpc_request_pool *pool)
704 {
705         struct ptlrpc_request *request = NULL;
706
707         if (pool)
708                 request = ptlrpc_prep_req_from_pool(pool);
709
710         if (!request)
711                 request = ptlrpc_request_cache_alloc(GFP_NOFS);
712
713         if (request) {
714                 LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
715                 LASSERT(imp != LP_POISON);
716                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
717                         imp->imp_client);
718                 LASSERT(imp->imp_client != LP_POISON);
719
720                 request->rq_import = class_import_get(imp);
721         } else {
722                 CERROR("request allocation out of memory\n");
723         }
724
725         return request;
726 }
727
728 /**
729  * Helper function for creating a request.
730  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
731  * buffer structures according to capsule template \a format.
732  * Returns allocated request structure pointer or NULL on error.
733  */
734 static struct ptlrpc_request *
735 ptlrpc_request_alloc_internal(struct obd_import *imp,
736                               struct ptlrpc_request_pool * pool,
737                               const struct req_format *format)
738 {
739         struct ptlrpc_request *request;
740
741         request = __ptlrpc_request_alloc(imp, pool);
742         if (request == NULL)
743                 return NULL;
744
745         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
746         req_capsule_set(&request->rq_pill, format);
747         return request;
748 }
749
750 /**
751  * Allocate new request structure for import \a imp and initialize its
752  * buffer structure according to capsule template \a format.
753  */
754 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
755                                             const struct req_format *format)
756 {
757         return ptlrpc_request_alloc_internal(imp, NULL, format);
758 }
759 EXPORT_SYMBOL(ptlrpc_request_alloc);
760
761 /**
762  * Allocate new request structure for import \a imp from pool \a pool and
763  * initialize its buffer structure according to capsule template \a format.
764  */
765 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
766                                             struct ptlrpc_request_pool * pool,
767                                             const struct req_format *format)
768 {
769         return ptlrpc_request_alloc_internal(imp, pool, format);
770 }
771 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
772
773 /**
774  * For requests not from pool, free memory of the request structure.
775  * For requests obtained from a pool earlier, return request back to pool.
776  */
777 void ptlrpc_request_free(struct ptlrpc_request *request)
778 {
779         if (request->rq_pool)
780                 __ptlrpc_free_req_to_pool(request);
781         else
782                 ptlrpc_request_cache_free(request);
783 }
784 EXPORT_SYMBOL(ptlrpc_request_free);
785
786 /**
787  * Allocate new request for operatione \a opcode and immediatelly pack it for
788  * network transfer.
789  * Only used for simple requests like OBD_PING where the only important
790  * part of the request is operation itself.
791  * Returns allocated request or NULL on error.
792  */
793 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
794                                                 const struct req_format *format,
795                                                 __u32 version, int opcode)
796 {
797         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
798         int                    rc;
799
800         if (req) {
801                 rc = ptlrpc_request_pack(req, version, opcode);
802                 if (rc) {
803                         ptlrpc_request_free(req);
804                         req = NULL;
805                 }
806         }
807         return req;
808 }
809 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
810
811 /**
812  * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
813  * for operation \a opcode. Request would contain \a count buffers.
814  * Sizes of buffers are described in array \a lengths and buffers themselves
815  * are provided by a pointer \a bufs.
816  * Returns prepared request structure pointer or NULL on error.
817  */
818 struct ptlrpc_request *
819 ptlrpc_prep_req_pool(struct obd_import *imp,
820                      __u32 version, int opcode,
821                      int count, __u32 *lengths, char **bufs,
822                      struct ptlrpc_request_pool *pool)
823 {
824         struct ptlrpc_request *request;
825         int                    rc;
826
827         request = __ptlrpc_request_alloc(imp, pool);
828         if (!request)
829                 return NULL;
830
831         rc = __ptlrpc_request_bufs_pack(request, version, opcode, count,
832                                         lengths, bufs, NULL);
833         if (rc) {
834                 ptlrpc_request_free(request);
835                 request = NULL;
836         }
837         return request;
838 }
839 EXPORT_SYMBOL(ptlrpc_prep_req_pool);
840
841 /**
842  * Same as ptlrpc_prep_req_pool, but without pool
843  */
844 struct ptlrpc_request *
845 ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
846                 __u32 *lengths, char **bufs)
847 {
848         return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
849                                     NULL);
850 }
851 EXPORT_SYMBOL(ptlrpc_prep_req);
852
853 /**
854  * Allocate and initialize new request set structure.
855  * Returns a pointer to the newly allocated set structure or NULL on error.
856  */
857 struct ptlrpc_request_set *ptlrpc_prep_set(void)
858 {
859         struct ptlrpc_request_set *set;
860
861         ENTRY;
862         OBD_ALLOC(set, sizeof *set);
863         if (!set)
864                 RETURN(NULL);
865         atomic_set(&set->set_refcount, 1);
866         CFS_INIT_LIST_HEAD(&set->set_requests);
867         init_waitqueue_head(&set->set_waitq);
868         atomic_set(&set->set_new_count, 0);
869         atomic_set(&set->set_remaining, 0);
870         spin_lock_init(&set->set_new_req_lock);
871         CFS_INIT_LIST_HEAD(&set->set_new_requests);
872         CFS_INIT_LIST_HEAD(&set->set_cblist);
873         set->set_max_inflight = UINT_MAX;
874         set->set_producer     = NULL;
875         set->set_producer_arg = NULL;
876         set->set_rc           = 0;
877
878         RETURN(set);
879 }
880 EXPORT_SYMBOL(ptlrpc_prep_set);
881
882 /**
883  * Allocate and initialize new request set structure with flow control
884  * extension. This extension allows to control the number of requests in-flight
885  * for the whole set. A callback function to generate requests must be provided
886  * and the request set will keep the number of requests sent over the wire to
887  * @max_inflight.
888  * Returns a pointer to the newly allocated set structure or NULL on error.
889  */
890 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
891                                              void *arg)
892
893 {
894         struct ptlrpc_request_set *set;
895
896         set = ptlrpc_prep_set();
897         if (!set)
898                 RETURN(NULL);
899
900         set->set_max_inflight  = max;
901         set->set_producer      = func;
902         set->set_producer_arg  = arg;
903
904         RETURN(set);
905 }
906 EXPORT_SYMBOL(ptlrpc_prep_fcset);
907
908 /**
909  * Wind down and free request set structure previously allocated with
910  * ptlrpc_prep_set.
911  * Ensures that all requests on the set have completed and removes
912  * all requests from the request list in a set.
913  * If any unsent request happen to be on the list, pretends that they got
914  * an error in flight and calls their completion handler.
915  */
916 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
917 {
918         cfs_list_t       *tmp;
919         cfs_list_t       *next;
920         int               expected_phase;
921         int               n = 0;
922         ENTRY;
923
924         /* Requests on the set should either all be completed, or all be new */
925         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
926                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
927         cfs_list_for_each (tmp, &set->set_requests) {
928                 struct ptlrpc_request *req =
929                         cfs_list_entry(tmp, struct ptlrpc_request,
930                                        rq_set_chain);
931
932                 LASSERT(req->rq_phase == expected_phase);
933                 n++;
934         }
935
936         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
937                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
938                  atomic_read(&set->set_remaining), n);
939
940         cfs_list_for_each_safe(tmp, next, &set->set_requests) {
941                 struct ptlrpc_request *req =
942                         cfs_list_entry(tmp, struct ptlrpc_request,
943                                        rq_set_chain);
944                 cfs_list_del_init(&req->rq_set_chain);
945
946                 LASSERT(req->rq_phase == expected_phase);
947
948                 if (req->rq_phase == RQ_PHASE_NEW) {
949                         ptlrpc_req_interpret(NULL, req, -EBADR);
950                         atomic_dec(&set->set_remaining);
951                 }
952
953                 spin_lock(&req->rq_lock);
954                 req->rq_set = NULL;
955                 req->rq_invalid_rqset = 0;
956                 spin_unlock(&req->rq_lock);
957
958                 ptlrpc_req_finished (req);
959         }
960
961         LASSERT(atomic_read(&set->set_remaining) == 0);
962
963         ptlrpc_reqset_put(set);
964         EXIT;
965 }
966 EXPORT_SYMBOL(ptlrpc_set_destroy);
967
968 /**
969  * Add a callback function \a fn to the set.
970  * This function would be called when all requests on this set are completed.
971  * The function will be passed \a data argument.
972  */
973 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
974                       set_interpreter_func fn, void *data)
975 {
976         struct ptlrpc_set_cbdata *cbdata;
977
978         OBD_ALLOC_PTR(cbdata);
979         if (cbdata == NULL)
980                 RETURN(-ENOMEM);
981
982         cbdata->psc_interpret = fn;
983         cbdata->psc_data = data;
984         cfs_list_add_tail(&cbdata->psc_item, &set->set_cblist);
985
986         RETURN(0);
987 }
988 EXPORT_SYMBOL(ptlrpc_set_add_cb);
989
990 /**
991  * Add a new request to the general purpose request set.
992  * Assumes request reference from the caller.
993  */
994 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
995                         struct ptlrpc_request *req)
996 {
997         LASSERT(cfs_list_empty(&req->rq_set_chain));
998
999         /* The set takes over the caller's request reference */
1000         cfs_list_add_tail(&req->rq_set_chain, &set->set_requests);
1001         req->rq_set = set;
1002         atomic_inc(&set->set_remaining);
1003         req->rq_queued_time = cfs_time_current();
1004
1005         if (req->rq_reqmsg != NULL)
1006                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1007
1008         if (set->set_producer != NULL)
1009                 /* If the request set has a producer callback, the RPC must be
1010                  * sent straight away */
1011                 ptlrpc_send_new_req(req);
1012 }
1013 EXPORT_SYMBOL(ptlrpc_set_add_req);
1014
1015 /**
1016  * Add a request to a request with dedicated server thread
1017  * and wake the thread to make any necessary processing.
1018  * Currently only used for ptlrpcd.
1019  */
1020 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1021                            struct ptlrpc_request *req)
1022 {
1023         struct ptlrpc_request_set *set = pc->pc_set;
1024         int count, i;
1025
1026         LASSERT(req->rq_set == NULL);
1027         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1028
1029         spin_lock(&set->set_new_req_lock);
1030         /*
1031          * The set takes over the caller's request reference.
1032          */
1033         req->rq_set = set;
1034         req->rq_queued_time = cfs_time_current();
1035         cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1036         count = atomic_inc_return(&set->set_new_count);
1037         spin_unlock(&set->set_new_req_lock);
1038
1039         /* Only need to call wakeup once for the first entry. */
1040         if (count == 1) {
1041                 wake_up(&set->set_waitq);
1042
1043                 /* XXX: It maybe unnecessary to wakeup all the partners. But to
1044                  *      guarantee the async RPC can be processed ASAP, we have
1045                  *      no other better choice. It maybe fixed in future. */
1046                 for (i = 0; i < pc->pc_npartners; i++)
1047                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1048         }
1049 }
1050 EXPORT_SYMBOL(ptlrpc_set_add_new_req);
1051
1052 /**
1053  * Based on the current state of the import, determine if the request
1054  * can be sent, is an error, or should be delayed.
1055  *
1056  * Returns true if this request should be delayed. If false, and
1057  * *status is set, then the request can not be sent and *status is the
1058  * error code.  If false and status is 0, then request can be sent.
1059  *
1060  * The imp->imp_lock must be held.
1061  */
1062 static int ptlrpc_import_delay_req(struct obd_import *imp,
1063                                    struct ptlrpc_request *req, int *status)
1064 {
1065         int delay = 0;
1066         ENTRY;
1067
1068         LASSERT (status != NULL);
1069         *status = 0;
1070
1071         if (req->rq_ctx_init || req->rq_ctx_fini) {
1072                 /* always allow ctx init/fini rpc go through */
1073         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1074                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
1075                 *status = -EIO;
1076         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1077                 /* pings may safely race with umount */
1078                 DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ?
1079                           D_HA : D_ERROR, req, "IMP_CLOSED ");
1080                 *status = -EIO;
1081         } else if (ptlrpc_send_limit_expired(req)) {
1082                 /* probably doesn't need to be a D_ERROR after initial testing */
1083                 DEBUG_REQ(D_ERROR, req, "send limit expired ");
1084                 *status = -EIO;
1085         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1086                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1087                 /* allow CONNECT even if import is invalid */ ;
1088                 if (atomic_read(&imp->imp_inval_count) != 0) {
1089                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1090                         *status = -EIO;
1091                 }
1092         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1093                 if (!imp->imp_deactive)
1094                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1095                 *status = -ESHUTDOWN; /* bz 12940 */
1096         } else if (req->rq_import_generation != imp->imp_generation) {
1097                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
1098                 *status = -EIO;
1099         } else if (req->rq_send_state != imp->imp_state) {
1100                 /* invalidate in progress - any requests should be drop */
1101                 if (atomic_read(&imp->imp_inval_count) != 0) {
1102                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1103                         *status = -EIO;
1104                 } else if (imp->imp_dlm_fake || req->rq_no_delay) {
1105                         *status = -EWOULDBLOCK;
1106                 } else if (req->rq_allow_replay &&
1107                           (imp->imp_state == LUSTRE_IMP_REPLAY ||
1108                            imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1109                            imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1110                            imp->imp_state == LUSTRE_IMP_RECOVER)) {
1111                         DEBUG_REQ(D_HA, req, "allow during recovery.\n");
1112                 } else {
1113                         delay = 1;
1114                 }
1115         }
1116
1117         RETURN(delay);
1118 }
1119
1120 /**
1121  * Decide if the eror message regarding provided request \a req
1122  * should be printed to the console or not.
1123  * Makes it's decision on request status and other properties.
1124  * Returns 1 to print error on the system console or 0 if not.
1125  */
1126 static int ptlrpc_console_allow(struct ptlrpc_request *req)
1127 {
1128         __u32 opc;
1129         int err;
1130
1131         LASSERT(req->rq_reqmsg != NULL);
1132         opc = lustre_msg_get_opc(req->rq_reqmsg);
1133
1134         /* Suppress particular reconnect errors which are to be expected.  No
1135          * errors are suppressed for the initial connection on an import */
1136         if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
1137             (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
1138
1139                 /* Suppress timed out reconnect requests */
1140                 if (req->rq_timedout)
1141                         return 0;
1142
1143                 /* Suppress unavailable/again reconnect requests */
1144                 err = lustre_msg_get_status(req->rq_repmsg);
1145                 if (err == -ENODEV || err == -EAGAIN)
1146                         return 0;
1147         }
1148
1149         return 1;
1150 }
1151
1152 /**
1153  * Check request processing status.
1154  * Returns the status.
1155  */
1156 static int ptlrpc_check_status(struct ptlrpc_request *req)
1157 {
1158         int err;
1159         ENTRY;
1160
1161         err = lustre_msg_get_status(req->rq_repmsg);
1162         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1163                 struct obd_import *imp = req->rq_import;
1164                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1165                 if (ptlrpc_console_allow(req))
1166                         LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
1167                                            " operation %s failed with %d.\n",
1168                                            imp->imp_obd->obd_name,
1169                                            libcfs_nid2str(
1170                                            imp->imp_connection->c_peer.nid),
1171                                            ll_opcode2str(opc), err);
1172                 RETURN(err < 0 ? err : -EINVAL);
1173         }
1174
1175         if (err < 0) {
1176                 DEBUG_REQ(D_INFO, req, "status is %d", err);
1177         } else if (err > 0) {
1178                 /* XXX: translate this error from net to host */
1179                 DEBUG_REQ(D_INFO, req, "status is %d", err);
1180         }
1181
1182         RETURN(err);
1183 }
1184
1185 /**
1186  * save pre-versions of objects into request for replay.
1187  * Versions are obtained from server reply.
1188  * used for VBR.
1189  */
1190 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1191 {
1192         struct lustre_msg *repmsg = req->rq_repmsg;
1193         struct lustre_msg *reqmsg = req->rq_reqmsg;
1194         __u64 *versions = lustre_msg_get_versions(repmsg);
1195         ENTRY;
1196
1197         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1198                 return;
1199
1200         LASSERT(versions);
1201         lustre_msg_set_versions(reqmsg, versions);
1202         CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n",
1203                versions[0], versions[1]);
1204
1205         EXIT;
1206 }
1207
1208 /**
1209  * Callback function called when client receives RPC reply for \a req.
1210  * Returns 0 on success or error code.
1211  * The return alue would be assigned to req->rq_status by the caller
1212  * as request processing status.
1213  * This function also decides if the request needs to be saved for later replay.
1214  */
1215 static int after_reply(struct ptlrpc_request *req)
1216 {
1217         struct obd_import *imp = req->rq_import;
1218         struct obd_device *obd = req->rq_import->imp_obd;
1219         int rc;
1220         struct timeval work_start;
1221         long timediff;
1222         ENTRY;
1223
1224         LASSERT(obd != NULL);
1225         /* repbuf must be unlinked */
1226         LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
1227
1228         if (req->rq_reply_truncate) {
1229                 if (ptlrpc_no_resend(req)) {
1230                         DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
1231                                   " expected: %d, actual size: %d",
1232                                   req->rq_nob_received, req->rq_repbuf_len);
1233                         RETURN(-EOVERFLOW);
1234                 }
1235
1236                 sptlrpc_cli_free_repbuf(req);
1237                 /* Pass the required reply buffer size (include
1238                  * space for early reply).
1239                  * NB: no need to roundup because alloc_repbuf
1240                  * will roundup it */
1241                 req->rq_replen       = req->rq_nob_received;
1242                 req->rq_nob_received = 0;
1243                 spin_lock(&req->rq_lock);
1244                 req->rq_resend       = 1;
1245                 spin_unlock(&req->rq_lock);
1246                 RETURN(0);
1247         }
1248
1249         /*
1250          * NB Until this point, the whole of the incoming message,
1251          * including buflens, status etc is in the sender's byte order.
1252          */
1253         rc = sptlrpc_cli_unwrap_reply(req);
1254         if (rc) {
1255                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
1256                 RETURN(rc);
1257         }
1258
1259         /*
1260          * Security layer unwrap might ask resend this request.
1261          */
1262         if (req->rq_resend)
1263                 RETURN(0);
1264
1265         rc = unpack_reply(req);
1266         if (rc)
1267                 RETURN(rc);
1268
1269         /* retry indefinitely on EINPROGRESS */
1270         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1271             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1272                 time_t  now = cfs_time_current_sec();
1273
1274                 DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
1275                 req->rq_resend = 1;
1276                 req->rq_nr_resend++;
1277
1278                 /* allocate new xid to avoid reply reconstruction */
1279                 if (!req->rq_bulk) {
1280                         /* new xid is already allocated for bulk in
1281                          * ptlrpc_check_set() */
1282                         req->rq_xid = ptlrpc_next_xid();
1283                         DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
1284                                   "resend on EINPROGRESS");
1285                 }
1286
1287                 /* Readjust the timeout for current conditions */
1288                 ptlrpc_at_set_req_timeout(req);
1289                 /* delay resend to give a chance to the server to get ready.
1290                  * The delay is increased by 1s on every resend and is capped to
1291                  * the current request timeout (i.e. obd_timeout if AT is off,
1292                  * or AT service time x 125% + 5s, see at_est2timeout) */
1293                 if (req->rq_nr_resend > req->rq_timeout)
1294                         req->rq_sent = now + req->rq_timeout;
1295                 else
1296                         req->rq_sent = now + req->rq_nr_resend;
1297
1298                 RETURN(0);
1299         }
1300
1301         do_gettimeofday(&work_start);
1302         timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
1303         if (obd->obd_svc_stats != NULL) {
1304                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1305                                     timediff);
1306                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1307         }
1308
1309         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1310             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1311                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1312                           lustre_msg_get_type(req->rq_repmsg));
1313                 RETURN(-EPROTO);
1314         }
1315
1316         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1317                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1318         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1319         ptlrpc_at_adj_net_latency(req,
1320                                   lustre_msg_get_service_time(req->rq_repmsg));
1321
1322         rc = ptlrpc_check_status(req);
1323         imp->imp_connect_error = rc;
1324
1325         if (rc) {
1326                 /*
1327                  * Either we've been evicted, or the server has failed for
1328                  * some reason. Try to reconnect, and if that fails, punt to
1329                  * the upcall.
1330                  */
1331                 if (ll_rpc_recoverable_error(rc)) {
1332                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1333                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1334                                 RETURN(rc);
1335                         }
1336                         ptlrpc_request_handle_notconn(req);
1337                         RETURN(rc);
1338                 }
1339         } else {
1340                 /*
1341                  * Let's look if server sent slv. Do it only for RPC with
1342                  * rc == 0.
1343                  */
1344                 ldlm_cli_update_pool(req);
1345         }
1346
1347         /*
1348          * Store transno in reqmsg for replay.
1349          */
1350         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1351                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1352                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1353         }
1354
1355         if (imp->imp_replayable) {
1356                 spin_lock(&imp->imp_lock);
1357                 /*
1358                  * No point in adding already-committed requests to the replay
1359                  * list, we will just remove them immediately. b=9829
1360                  */
1361                 if (req->rq_transno != 0 &&
1362                     (req->rq_transno >
1363                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1364                      req->rq_replay)) {
1365                         /** version recovery */
1366                         ptlrpc_save_versions(req);
1367                         ptlrpc_retain_replayable_request(req, imp);
1368                 } else if (req->rq_commit_cb != NULL &&
1369                            list_empty(&req->rq_replay_list)) {
1370                         /* NB: don't call rq_commit_cb if it's already on
1371                          * rq_replay_list, ptlrpc_free_committed() will call
1372                          * it later, see LU-3618 for details */
1373                         spin_unlock(&imp->imp_lock);
1374                         req->rq_commit_cb(req);
1375                         spin_lock(&imp->imp_lock);
1376                 }
1377
1378                 /*
1379                  * Replay-enabled imports return commit-status information.
1380                  */
1381                 if (lustre_msg_get_last_committed(req->rq_repmsg)) {
1382                         imp->imp_peer_committed_transno =
1383                                 lustre_msg_get_last_committed(req->rq_repmsg);
1384                 }
1385
1386                 ptlrpc_free_committed(imp);
1387
1388                 if (!cfs_list_empty(&imp->imp_replay_list)) {
1389                         struct ptlrpc_request *last;
1390
1391                         last = cfs_list_entry(imp->imp_replay_list.prev,
1392                                               struct ptlrpc_request,
1393                                               rq_replay_list);
1394                         /*
1395                          * Requests with rq_replay stay on the list even if no
1396                          * commit is expected.
1397                          */
1398                         if (last->rq_transno > imp->imp_peer_committed_transno)
1399                                 ptlrpc_pinger_commit_expected(imp);
1400                 }
1401
1402                 spin_unlock(&imp->imp_lock);
1403         }
1404
1405         RETURN(rc);
1406 }
1407
1408 /**
1409  * Helper function to send request \a req over the network for the first time
1410  * Also adjusts request phase.
1411  * Returns 0 on success or error code.
1412  */
1413 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1414 {
1415         struct obd_import     *imp = req->rq_import;
1416         int rc;
1417         ENTRY;
1418
1419         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1420         if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
1421             (!req->rq_generation_set ||
1422              req->rq_import_generation == imp->imp_generation))
1423                 RETURN (0);
1424
1425         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1426
1427         spin_lock(&imp->imp_lock);
1428
1429         if (!req->rq_generation_set)
1430                 req->rq_import_generation = imp->imp_generation;
1431
1432         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1433                 spin_lock(&req->rq_lock);
1434                 req->rq_waiting = 1;
1435                 spin_unlock(&req->rq_lock);
1436
1437                 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
1438                           "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
1439                           ptlrpc_import_state_name(req->rq_send_state),
1440                           ptlrpc_import_state_name(imp->imp_state));
1441                 LASSERT(cfs_list_empty(&req->rq_list));
1442                 cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1443                 atomic_inc(&req->rq_import->imp_inflight);
1444                 spin_unlock(&imp->imp_lock);
1445                 RETURN(0);
1446         }
1447
1448         if (rc != 0) {
1449                 spin_unlock(&imp->imp_lock);
1450                 req->rq_status = rc;
1451                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1452                 RETURN(rc);
1453         }
1454
1455         LASSERT(cfs_list_empty(&req->rq_list));
1456         cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
1457         atomic_inc(&req->rq_import->imp_inflight);
1458         spin_unlock(&imp->imp_lock);
1459
1460         lustre_msg_set_status(req->rq_reqmsg, current_pid());
1461
1462         rc = sptlrpc_req_refresh_ctx(req, -1);
1463         if (rc) {
1464                 if (req->rq_err) {
1465                         req->rq_status = rc;
1466                         RETURN(1);
1467                 } else {
1468                         spin_lock(&req->rq_lock);
1469                         req->rq_wait_ctx = 1;
1470                         spin_unlock(&req->rq_lock);
1471                         RETURN(0);
1472                 }
1473         }
1474
1475         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
1476                " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
1477                imp->imp_obd->obd_uuid.uuid,
1478                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1479                libcfs_nid2str(imp->imp_connection->c_peer.nid),
1480                lustre_msg_get_opc(req->rq_reqmsg));
1481
1482         rc = ptl_send_rpc(req, 0);
1483         if (rc) {
1484                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
1485                 spin_lock(&req->rq_lock);
1486                 req->rq_net_err = 1;
1487                 spin_unlock(&req->rq_lock);
1488                 RETURN(rc);
1489         }
1490         RETURN(0);
1491 }
1492
1493 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1494 {
1495         int remaining, rc;
1496         ENTRY;
1497
1498         LASSERT(set->set_producer != NULL);
1499
1500         remaining = atomic_read(&set->set_remaining);
1501
1502         /* populate the ->set_requests list with requests until we
1503          * reach the maximum number of RPCs in flight for this set */
1504         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1505                 rc = set->set_producer(set, set->set_producer_arg);
1506                 if (rc == -ENOENT) {
1507                         /* no more RPC to produce */
1508                         set->set_producer     = NULL;
1509                         set->set_producer_arg = NULL;
1510                         RETURN(0);
1511                 }
1512         }
1513
1514         RETURN((atomic_read(&set->set_remaining) - remaining));
1515 }
1516
1517 /**
1518  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1519  * and no more replies are expected.
1520  * (it is possible to get less replies than requests sent e.g. due to timed out
1521  * requests or requests that we had trouble to send out)
1522  *
1523  * NOTE: This function contains a potential schedule point (cond_resched()).
1524  */
1525 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1526 {
1527         cfs_list_t *tmp, *next;
1528         int force_timer_recalc = 0;
1529         ENTRY;
1530
1531         if (atomic_read(&set->set_remaining) == 0)
1532                 RETURN(1);
1533
1534         cfs_list_for_each_safe(tmp, next, &set->set_requests) {
1535                 struct ptlrpc_request *req =
1536                         cfs_list_entry(tmp, struct ptlrpc_request,
1537                                        rq_set_chain);
1538                 struct obd_import *imp = req->rq_import;
1539                 int unregistered = 0;
1540                 int rc = 0;
1541
1542                 /* This schedule point is mainly for the ptlrpcd caller of this
1543                  * function.  Most ptlrpc sets are not long-lived and unbounded
1544                  * in length, but at the least the set used by the ptlrpcd is.
1545                  * Since the processing time is unbounded, we need to insert an
1546                  * explicit schedule point to make the thread well-behaved.
1547                  */
1548                 cond_resched();
1549
1550                 if (req->rq_phase == RQ_PHASE_NEW &&
1551                     ptlrpc_send_new_req(req)) {
1552                         force_timer_recalc = 1;
1553                 }
1554
1555                 /* delayed send - skip */
1556                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1557                         continue;
1558
1559                 /* delayed resend - skip */
1560                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1561                     req->rq_sent > cfs_time_current_sec())
1562                         continue;
1563
1564                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1565                       req->rq_phase == RQ_PHASE_BULK ||
1566                       req->rq_phase == RQ_PHASE_INTERPRET ||
1567                       req->rq_phase == RQ_PHASE_UNREGISTERING ||
1568                       req->rq_phase == RQ_PHASE_COMPLETE)) {
1569                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1570                         LBUG();
1571                 }
1572
1573                 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1574                         LASSERT(req->rq_next_phase != req->rq_phase);
1575                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1576
1577                         /*
1578                          * Skip processing until reply is unlinked. We
1579                          * can't return to pool before that and we can't
1580                          * call interpret before that. We need to make
1581                          * sure that all rdma transfers finished and will
1582                          * not corrupt any data.
1583                          */
1584                         if (ptlrpc_client_recv_or_unlink(req) ||
1585                             ptlrpc_client_bulk_active(req))
1586                                 continue;
1587
1588                         /*
1589                          * Turn fail_loc off to prevent it from looping
1590                          * forever.
1591                          */
1592                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1593                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1594                                                      OBD_FAIL_ONCE);
1595                         }
1596                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1597                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1598                                                      OBD_FAIL_ONCE);
1599                         }
1600
1601                         /*
1602                          * Move to next phase if reply was successfully
1603                          * unlinked.
1604                          */
1605                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1606                 }
1607
1608                 if (req->rq_phase == RQ_PHASE_COMPLETE)
1609                         continue;
1610
1611                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1612                         GOTO(interpret, req->rq_status);
1613
1614                 /*
1615                  * Note that this also will start async reply unlink.
1616                  */
1617                 if (req->rq_net_err && !req->rq_timedout) {
1618                         ptlrpc_expire_one_request(req, 1);
1619
1620                         /*
1621                          * Check if we still need to wait for unlink.
1622                          */
1623                         if (ptlrpc_client_recv_or_unlink(req) ||
1624                             ptlrpc_client_bulk_active(req))
1625                                 continue;
1626                         /* If there is no need to resend, fail it now. */
1627                         if (req->rq_no_resend) {
1628                                 if (req->rq_status == 0)
1629                                         req->rq_status = -EIO;
1630                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1631                                 GOTO(interpret, req->rq_status);
1632                         } else {
1633                                 continue;
1634                         }
1635                 }
1636
1637                 if (req->rq_err) {
1638                         spin_lock(&req->rq_lock);
1639                         req->rq_replied = 0;
1640                         spin_unlock(&req->rq_lock);
1641                         if (req->rq_status == 0)
1642                                 req->rq_status = -EIO;
1643                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1644                         GOTO(interpret, req->rq_status);
1645                 }
1646
1647                 /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
1648                  * so it sets rq_intr regardless of individual rpc
1649                  * timeouts. The synchronous IO waiting path sets 
1650                  * rq_intr irrespective of whether ptlrpcd
1651                  * has seen a timeout.  Our policy is to only interpret
1652                  * interrupted rpcs after they have timed out, so we
1653                  * need to enforce that here.
1654                  */
1655
1656                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
1657                                      req->rq_wait_ctx)) {
1658                         req->rq_status = -EINTR;
1659                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1660                         GOTO(interpret, req->rq_status);
1661                 }
1662
1663                 if (req->rq_phase == RQ_PHASE_RPC) {
1664                         if (req->rq_timedout || req->rq_resend ||
1665                             req->rq_waiting || req->rq_wait_ctx) {
1666                                 int status;
1667
1668                                 if (!ptlrpc_unregister_reply(req, 1))
1669                                         continue;
1670
1671                                 spin_lock(&imp->imp_lock);
1672                                 if (ptlrpc_import_delay_req(imp, req, &status)){
1673                                         /* put on delay list - only if we wait
1674                                          * recovery finished - before send */
1675                                         cfs_list_del_init(&req->rq_list);
1676                                         cfs_list_add_tail(&req->rq_list,
1677                                                           &imp->
1678                                                           imp_delayed_list);
1679                                         spin_unlock(&imp->imp_lock);
1680                                         continue;
1681                                 }
1682
1683                                 if (status != 0)  {
1684                                         req->rq_status = status;
1685                                         ptlrpc_rqphase_move(req,
1686                                                 RQ_PHASE_INTERPRET);
1687                                         spin_unlock(&imp->imp_lock);
1688                                         GOTO(interpret, req->rq_status);
1689                                 }
1690                                 if (ptlrpc_no_resend(req) &&
1691                                     !req->rq_wait_ctx) {
1692                                         req->rq_status = -ENOTCONN;
1693                                         ptlrpc_rqphase_move(req,
1694                                                             RQ_PHASE_INTERPRET);
1695                                         spin_unlock(&imp->imp_lock);
1696                                         GOTO(interpret, req->rq_status);
1697                                 }
1698
1699                                 cfs_list_del_init(&req->rq_list);
1700                                 cfs_list_add_tail(&req->rq_list,
1701                                                   &imp->imp_sending_list);
1702
1703                                 spin_unlock(&imp->imp_lock);
1704
1705                                 spin_lock(&req->rq_lock);
1706                                 req->rq_waiting = 0;
1707                                 spin_unlock(&req->rq_lock);
1708
1709                                 if (req->rq_timedout || req->rq_resend) {
1710                                         /* This is re-sending anyways,
1711                                          * let's mark req as resend. */
1712                                         spin_lock(&req->rq_lock);
1713                                         req->rq_resend = 1;
1714                                         spin_unlock(&req->rq_lock);
1715                                         if (req->rq_bulk) {
1716                                                 __u64 old_xid;
1717
1718                                                 if (!ptlrpc_unregister_bulk(req, 1))
1719                                                         continue;
1720
1721                                                 /* ensure previous bulk fails */
1722                                                 old_xid = req->rq_xid;
1723                                                 req->rq_xid = ptlrpc_next_xid();
1724                                                 CDEBUG(D_HA, "resend bulk "
1725                                                        "old x"LPU64
1726                                                        " new x"LPU64"\n",
1727                                                        old_xid, req->rq_xid);
1728                                         }
1729                                 }
1730                                 /*
1731                                  * rq_wait_ctx is only touched by ptlrpcd,
1732                                  * so no lock is needed here.
1733                                  */
1734                                 status = sptlrpc_req_refresh_ctx(req, -1);
1735                                 if (status) {
1736                                         if (req->rq_err) {
1737                                                 req->rq_status = status;
1738                                                 spin_lock(&req->rq_lock);
1739                                                 req->rq_wait_ctx = 0;
1740                                                 spin_unlock(&req->rq_lock);
1741                                                 force_timer_recalc = 1;
1742                                         } else {
1743                                                 spin_lock(&req->rq_lock);
1744                                                 req->rq_wait_ctx = 1;
1745                                                 spin_unlock(&req->rq_lock);
1746                                         }
1747
1748                                         continue;
1749                                 } else {
1750                                         spin_lock(&req->rq_lock);
1751                                         req->rq_wait_ctx = 0;
1752                                         spin_unlock(&req->rq_lock);
1753                                 }
1754
1755                                 rc = ptl_send_rpc(req, 0);
1756                                 if (rc) {
1757                                         DEBUG_REQ(D_HA, req,
1758                                                   "send failed: rc = %d", rc);
1759                                         force_timer_recalc = 1;
1760                                         spin_lock(&req->rq_lock);
1761                                         req->rq_net_err = 1;
1762                                         spin_unlock(&req->rq_lock);
1763                                         continue;
1764                                 }
1765                                 /* need to reset the timeout */
1766                                 force_timer_recalc = 1;
1767                         }
1768
1769                         spin_lock(&req->rq_lock);
1770
1771                         if (ptlrpc_client_early(req)) {
1772                                 ptlrpc_at_recv_early_reply(req);
1773                                 spin_unlock(&req->rq_lock);
1774                                 continue;
1775                         }
1776
1777                         /* Still waiting for a reply? */
1778                         if (ptlrpc_client_recv(req)) {
1779                                 spin_unlock(&req->rq_lock);
1780                                 continue;
1781                         }
1782
1783                         /* Did we actually receive a reply? */
1784                         if (!ptlrpc_client_replied(req)) {
1785                                 spin_unlock(&req->rq_lock);
1786                                 continue;
1787                         }
1788
1789                         spin_unlock(&req->rq_lock);
1790
1791                         /* unlink from net because we are going to
1792                          * swab in-place of reply buffer */
1793                         unregistered = ptlrpc_unregister_reply(req, 1);
1794                         if (!unregistered)
1795                                 continue;
1796
1797                         req->rq_status = after_reply(req);
1798                         if (req->rq_resend)
1799                                 continue;
1800
1801                         /* If there is no bulk associated with this request,
1802                          * then we're done and should let the interpreter
1803                          * process the reply. Similarly if the RPC returned
1804                          * an error, and therefore the bulk will never arrive.
1805                          */
1806                         if (req->rq_bulk == NULL || req->rq_status < 0) {
1807                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1808                                 GOTO(interpret, req->rq_status);
1809                         }
1810
1811                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
1812                 }
1813
1814                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
1815                 if (ptlrpc_client_bulk_active(req))
1816                         continue;
1817
1818                 if (req->rq_bulk->bd_failure) {
1819                         /* The RPC reply arrived OK, but the bulk screwed
1820                          * up!  Dead weird since the server told us the RPC
1821                          * was good after getting the REPLY for her GET or
1822                          * the ACK for her PUT. */
1823                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1824                         req->rq_status = -EIO;
1825                 }
1826
1827                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1828
1829         interpret:
1830                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
1831
1832                 /* This moves to "unregistering" phase we need to wait for
1833                  * reply unlink. */
1834                 if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
1835                         /* start async bulk unlink too */
1836                         ptlrpc_unregister_bulk(req, 1);
1837                         continue;
1838                 }
1839
1840                 if (!ptlrpc_unregister_bulk(req, 1))
1841                         continue;
1842
1843                 /* When calling interpret receiving already should be
1844                  * finished. */
1845                 LASSERT(!req->rq_receiving_reply);
1846
1847                 ptlrpc_req_interpret(env, req, req->rq_status);
1848
1849                 if (ptlrpcd_check_work(req)) {
1850                         atomic_dec(&set->set_remaining);
1851                         continue;
1852                 }
1853                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
1854
1855                 CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
1856                         "Completed RPC pname:cluuid:pid:xid:nid:"
1857                         "opc %s:%s:%d:"LPU64":%s:%d\n",
1858                         current_comm(), imp->imp_obd->obd_uuid.uuid,
1859                         lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1860                         libcfs_nid2str(imp->imp_connection->c_peer.nid),
1861                         lustre_msg_get_opc(req->rq_reqmsg));
1862
1863                 spin_lock(&imp->imp_lock);
1864                 /* Request already may be not on sending or delaying list. This
1865                  * may happen in the case of marking it erroneous for the case
1866                  * ptlrpc_import_delay_req(req, status) find it impossible to
1867                  * allow sending this rpc and returns *status != 0. */
1868                 if (!cfs_list_empty(&req->rq_list)) {
1869                         cfs_list_del_init(&req->rq_list);
1870                         atomic_dec(&imp->imp_inflight);
1871                 }
1872                 spin_unlock(&imp->imp_lock);
1873
1874                 atomic_dec(&set->set_remaining);
1875                 wake_up_all(&imp->imp_recovery_waitq);
1876
1877                 if (set->set_producer) {
1878                         /* produce a new request if possible */
1879                         if (ptlrpc_set_producer(set) > 0)
1880                                 force_timer_recalc = 1;
1881
1882                         /* free the request that has just been completed
1883                          * in order not to pollute set->set_requests */
1884                         cfs_list_del_init(&req->rq_set_chain);
1885                         spin_lock(&req->rq_lock);
1886                         req->rq_set = NULL;
1887                         req->rq_invalid_rqset = 0;
1888                         spin_unlock(&req->rq_lock);
1889
1890                         /* record rq_status to compute the final status later */
1891                         if (req->rq_status != 0)
1892                                 set->set_rc = req->rq_status;
1893                         ptlrpc_req_finished(req);
1894                 }
1895         }
1896
1897         /* If we hit an error, we want to recover promptly. */
1898         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
1899 }
1900 EXPORT_SYMBOL(ptlrpc_check_set);
1901
1902 /**
1903  * Time out request \a req. is \a async_unlink is set, that means do not wait
1904  * until LNet actually confirms network buffer unlinking.
1905  * Return 1 if we should give up further retrying attempts or 0 otherwise.
1906  */
1907 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
1908 {
1909         struct obd_import *imp = req->rq_import;
1910         int rc = 0;
1911         ENTRY;
1912
1913         spin_lock(&req->rq_lock);
1914         req->rq_timedout = 1;
1915         spin_unlock(&req->rq_lock);
1916
1917         DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
1918                   "/real "CFS_DURATION_T"]",
1919                   req->rq_net_err ? "failed due to network error" :
1920                      ((req->rq_real_sent == 0 ||
1921                        cfs_time_before(req->rq_real_sent, req->rq_sent) ||
1922                        cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
1923                       "timed out for sent delay" : "timed out for slow reply"),
1924                   req->rq_sent, req->rq_real_sent);
1925
1926         if (imp != NULL && obd_debug_peer_on_timeout)
1927                 LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
1928
1929         ptlrpc_unregister_reply(req, async_unlink);
1930         ptlrpc_unregister_bulk(req, async_unlink);
1931
1932         if (obd_dump_on_timeout)
1933                 libcfs_debug_dumplog();
1934
1935         if (imp == NULL) {
1936                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
1937                 RETURN(1);
1938         }
1939
1940         atomic_inc(&imp->imp_timeouts);
1941
1942         /* The DLM server doesn't want recovery run on its imports. */
1943         if (imp->imp_dlm_fake)
1944                 RETURN(1);
1945
1946         /* If this request is for recovery or other primordial tasks,
1947          * then error it out here. */
1948         if (req->rq_ctx_init || req->rq_ctx_fini ||
1949             req->rq_send_state != LUSTRE_IMP_FULL ||
1950             imp->imp_obd->obd_no_recov) {
1951                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
1952                           ptlrpc_import_state_name(req->rq_send_state),
1953                           ptlrpc_import_state_name(imp->imp_state));
1954                 spin_lock(&req->rq_lock);
1955                 req->rq_status = -ETIMEDOUT;
1956                 req->rq_err = 1;
1957                 spin_unlock(&req->rq_lock);
1958                 RETURN(1);
1959         }
1960
1961         /* if a request can't be resent we can't wait for an answer after
1962            the timeout */
1963         if (ptlrpc_no_resend(req)) {
1964                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
1965                 rc = 1;
1966         }
1967
1968         ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
1969
1970         RETURN(rc);
1971 }
1972
1973 /**
1974  * Time out all uncompleted requests in request set pointed by \a data
1975  * Callback used when waiting on sets with l_wait_event.
1976  * Always returns 1.
1977  */
1978 int ptlrpc_expired_set(void *data)
1979 {
1980         struct ptlrpc_request_set *set = data;
1981         cfs_list_t                *tmp;
1982         time_t                     now = cfs_time_current_sec();
1983         ENTRY;
1984
1985         LASSERT(set != NULL);
1986
1987         /*
1988          * A timeout expired. See which reqs it applies to...
1989          */
1990         cfs_list_for_each (tmp, &set->set_requests) {
1991                 struct ptlrpc_request *req =
1992                         cfs_list_entry(tmp, struct ptlrpc_request,
1993                                        rq_set_chain);
1994
1995                 /* don't expire request waiting for context */
1996                 if (req->rq_wait_ctx)
1997                         continue;
1998
1999                 /* Request in-flight? */
2000                 if (!((req->rq_phase == RQ_PHASE_RPC &&
2001                        !req->rq_waiting && !req->rq_resend) ||
2002                       (req->rq_phase == RQ_PHASE_BULK)))
2003                         continue;
2004
2005                 if (req->rq_timedout ||     /* already dealt with */
2006                     req->rq_deadline > now) /* not expired */
2007                         continue;
2008
2009                 /* Deal with this guy. Do it asynchronously to not block
2010                  * ptlrpcd thread. */
2011                 ptlrpc_expire_one_request(req, 1);
2012         }
2013
2014         /*
2015          * When waiting for a whole set, we always break out of the
2016          * sleep so we can recalculate the timeout, or enable interrupts
2017          * if everyone's timed out.
2018          */
2019         RETURN(1);
2020 }
2021 EXPORT_SYMBOL(ptlrpc_expired_set);
2022
2023 /**
2024  * Sets rq_intr flag in \a req under spinlock.
2025  */
2026 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
2027 {
2028         spin_lock(&req->rq_lock);
2029         req->rq_intr = 1;
2030         spin_unlock(&req->rq_lock);
2031 }
2032 EXPORT_SYMBOL(ptlrpc_mark_interrupted);
2033
2034 /**
2035  * Interrupts (sets interrupted flag) all uncompleted requests in
2036  * a set \a data. Callback for l_wait_event for interruptible waits.
2037  */
2038 void ptlrpc_interrupted_set(void *data)
2039 {
2040         struct ptlrpc_request_set *set = data;
2041         cfs_list_t *tmp;
2042
2043         LASSERT(set != NULL);
2044         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2045
2046         cfs_list_for_each(tmp, &set->set_requests) {
2047                 struct ptlrpc_request *req =
2048                         cfs_list_entry(tmp, struct ptlrpc_request,
2049                                        rq_set_chain);
2050
2051                 if (req->rq_phase != RQ_PHASE_RPC &&
2052                     req->rq_phase != RQ_PHASE_UNREGISTERING)
2053                         continue;
2054
2055                 ptlrpc_mark_interrupted(req);
2056         }
2057 }
2058 EXPORT_SYMBOL(ptlrpc_interrupted_set);
2059
2060 /**
2061  * Get the smallest timeout in the set; this does NOT set a timeout.
2062  */
2063 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2064 {
2065         cfs_list_t            *tmp;
2066         time_t                 now = cfs_time_current_sec();
2067         int                    timeout = 0;
2068         struct ptlrpc_request *req;
2069         int                    deadline;
2070         ENTRY;
2071
2072         cfs_list_for_each(tmp, &set->set_requests) {
2073                 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2074
2075                 /*
2076                  * Request in-flight?
2077                  */
2078                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2079                       (req->rq_phase == RQ_PHASE_BULK) ||
2080                       (req->rq_phase == RQ_PHASE_NEW)))
2081                         continue;
2082
2083                 /*
2084                  * Already timed out.
2085                  */
2086                 if (req->rq_timedout)
2087                         continue;
2088
2089                 /*
2090                  * Waiting for ctx.
2091                  */
2092                 if (req->rq_wait_ctx)
2093                         continue;
2094
2095                 if (req->rq_phase == RQ_PHASE_NEW)
2096                         deadline = req->rq_sent;
2097                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2098                         deadline = req->rq_sent;
2099                 else
2100                         deadline = req->rq_sent + req->rq_timeout;
2101
2102                 if (deadline <= now)    /* actually expired already */
2103                         timeout = 1;    /* ASAP */
2104                 else if (timeout == 0 || timeout > deadline - now)
2105                         timeout = deadline - now;
2106         }
2107         RETURN(timeout);
2108 }
2109 EXPORT_SYMBOL(ptlrpc_set_next_timeout);
2110
2111 /**
2112  * Send all unset request from the set and then wait untill all
2113  * requests in the set complete (either get a reply, timeout, get an
2114  * error or otherwise be interrupted).
2115  * Returns 0 on success or error code otherwise.
2116  */
2117 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
2118 {
2119         cfs_list_t            *tmp;
2120         struct ptlrpc_request *req;
2121         struct l_wait_info     lwi;
2122         int                    rc, timeout;
2123         ENTRY;
2124
2125         if (set->set_producer)
2126                 (void)ptlrpc_set_producer(set);
2127         else
2128                 cfs_list_for_each(tmp, &set->set_requests) {
2129                         req = cfs_list_entry(tmp, struct ptlrpc_request,
2130                                              rq_set_chain);
2131                         if (req->rq_phase == RQ_PHASE_NEW)
2132                                 (void)ptlrpc_send_new_req(req);
2133                 }
2134
2135         if (cfs_list_empty(&set->set_requests))
2136                 RETURN(0);
2137
2138         do {
2139                 timeout = ptlrpc_set_next_timeout(set);
2140
2141                 /* wait until all complete, interrupted, or an in-flight
2142                  * req times out */
2143                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
2144                        set, timeout);
2145
2146                 if (timeout == 0 && !cfs_signal_pending())
2147                         /*
2148                          * No requests are in-flight (ether timed out
2149                          * or delayed), so we can allow interrupts.
2150                          * We still want to block for a limited time,
2151                          * so we allow interrupts during the timeout.
2152                          */
2153                         lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), 
2154                                                    ptlrpc_expired_set,
2155                                                    ptlrpc_interrupted_set, set);
2156                 else
2157                         /*
2158                          * At least one request is in flight, so no
2159                          * interrupts are allowed. Wait until all
2160                          * complete, or an in-flight req times out. 
2161                          */
2162                         lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1),
2163                                           ptlrpc_expired_set, set);
2164
2165                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
2166
2167                 /* LU-769 - if we ignored the signal because it was already
2168                  * pending when we started, we need to handle it now or we risk
2169                  * it being ignored forever */
2170                 if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
2171                     cfs_signal_pending()) {
2172                         sigset_t blocked_sigs =
2173                                            cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
2174
2175                         /* In fact we only interrupt for the "fatal" signals
2176                          * like SIGINT or SIGKILL. We still ignore less
2177                          * important signals since ptlrpc set is not easily
2178                          * reentrant from userspace again */
2179                         if (cfs_signal_pending())
2180                                 ptlrpc_interrupted_set(set);
2181                         cfs_restore_sigs(blocked_sigs);
2182                 }
2183
2184                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2185
2186                 /* -EINTR => all requests have been flagged rq_intr so next
2187                  * check completes.
2188                  * -ETIMEDOUT => someone timed out.  When all reqs have
2189                  * timed out, signals are enabled allowing completion with
2190                  * EINTR.
2191                  * I don't really care if we go once more round the loop in
2192                  * the error cases -eeb. */
2193                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2194                         cfs_list_for_each(tmp, &set->set_requests) {
2195                                 req = cfs_list_entry(tmp, struct ptlrpc_request,
2196                                                      rq_set_chain);
2197                                 spin_lock(&req->rq_lock);
2198                                 req->rq_invalid_rqset = 1;
2199                                 spin_unlock(&req->rq_lock);
2200                         }
2201                 }
2202         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2203
2204         LASSERT(atomic_read(&set->set_remaining) == 0);
2205
2206         rc = set->set_rc; /* rq_status of already freed requests if any */
2207         cfs_list_for_each(tmp, &set->set_requests) {
2208                 req = cfs_list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2209
2210                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2211                 if (req->rq_status != 0)
2212                         rc = req->rq_status;
2213         }
2214
2215         if (set->set_interpret != NULL) {
2216                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
2217                         set->set_interpret;
2218                 rc = interpreter (set, set->set_arg, rc);
2219         } else {
2220                 struct ptlrpc_set_cbdata *cbdata, *n;
2221                 int err;
2222
2223                 cfs_list_for_each_entry_safe(cbdata, n,
2224                                          &set->set_cblist, psc_item) {
2225                         cfs_list_del_init(&cbdata->psc_item);
2226                         err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
2227                         if (err && !rc)
2228                                 rc = err;
2229                         OBD_FREE_PTR(cbdata);
2230                 }
2231         }
2232
2233         RETURN(rc);
2234 }
2235 EXPORT_SYMBOL(ptlrpc_set_wait);
2236
2237 /**
2238  * Helper fuction for request freeing.
2239  * Called when request count reached zero and request needs to be freed.
2240  * Removes request from all sorts of sending/replay lists it might be on,
2241  * frees network buffers if any are present.
2242  * If \a locked is set, that means caller is already holding import imp_lock
2243  * and so we no longer need to reobtain it (for certain lists manipulations)
2244  */
2245 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2246 {
2247         ENTRY;
2248         if (request == NULL) {
2249                 EXIT;
2250                 return;
2251         }
2252
2253         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
2254         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
2255         LASSERTF(cfs_list_empty(&request->rq_list), "req %p\n", request);
2256         LASSERTF(cfs_list_empty(&request->rq_set_chain), "req %p\n", request);
2257         LASSERTF(cfs_list_empty(&request->rq_exp_list), "req %p\n", request);
2258         LASSERTF(!request->rq_replay, "req %p\n", request);
2259
2260         req_capsule_fini(&request->rq_pill);
2261
2262         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
2263          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
2264         if (request->rq_import != NULL) {
2265                 if (!locked)
2266                         spin_lock(&request->rq_import->imp_lock);
2267                 cfs_list_del_init(&request->rq_replay_list);
2268                 if (!locked)
2269                         spin_unlock(&request->rq_import->imp_lock);
2270         }
2271         LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
2272
2273         if (atomic_read(&request->rq_refcount) != 0) {
2274                 DEBUG_REQ(D_ERROR, request,
2275                           "freeing request with nonzero refcount");
2276                 LBUG();
2277         }
2278
2279         if (request->rq_repbuf != NULL)
2280                 sptlrpc_cli_free_repbuf(request);
2281         if (request->rq_export != NULL) {
2282                 class_export_put(request->rq_export);
2283                 request->rq_export = NULL;
2284         }
2285         if (request->rq_import != NULL) {
2286                 class_import_put(request->rq_import);
2287                 request->rq_import = NULL;
2288         }
2289         if (request->rq_bulk != NULL)
2290                 ptlrpc_free_bulk_pin(request->rq_bulk);
2291
2292         if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
2293                 sptlrpc_cli_free_reqbuf(request);
2294
2295         if (request->rq_cli_ctx)
2296                 sptlrpc_req_put_ctx(request, !locked);
2297
2298         if (request->rq_pool)
2299                 __ptlrpc_free_req_to_pool(request);
2300         else
2301                 ptlrpc_request_cache_free(request);
2302         EXIT;
2303 }
2304
2305 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
2306 /**
2307  * Drop one request reference. Must be called with import imp_lock held.
2308  * When reference count drops to zero, reuqest is freed.
2309  */
2310 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
2311 {
2312         assert_spin_locked(&request->rq_import->imp_lock);
2313         (void)__ptlrpc_req_finished(request, 1);
2314 }
2315 EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
2316
2317 /**
2318  * Helper function
2319  * Drops one reference count for request \a request.
2320  * \a locked set indicates that caller holds import imp_lock.
2321  * Frees the request whe reference count reaches zero.
2322  */
2323 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
2324 {
2325         ENTRY;
2326         if (request == NULL)
2327                 RETURN(1);
2328
2329         if (request == LP_POISON ||
2330             request->rq_reqmsg == LP_POISON) {
2331                 CERROR("dereferencing freed request (bug 575)\n");
2332                 LBUG();
2333                 RETURN(1);
2334         }
2335
2336         DEBUG_REQ(D_INFO, request, "refcount now %u",
2337                   atomic_read(&request->rq_refcount) - 1);
2338
2339         if (atomic_dec_and_test(&request->rq_refcount)) {
2340                 __ptlrpc_free_req(request, locked);
2341                 RETURN(1);
2342         }
2343
2344         RETURN(0);
2345 }
2346
2347 /**
2348  * Drops one reference count for a request.
2349  */
2350 void ptlrpc_req_finished(struct ptlrpc_request *request)
2351 {
2352         __ptlrpc_req_finished(request, 0);
2353 }
2354 EXPORT_SYMBOL(ptlrpc_req_finished);
2355
2356 /**
2357  * Returns xid of a \a request
2358  */
2359 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2360 {
2361         return request->rq_xid;
2362 }
2363 EXPORT_SYMBOL(ptlrpc_req_xid);
2364
2365 /**
2366  * Disengage the client's reply buffer from the network
2367  * NB does _NOT_ unregister any client-side bulk.
2368  * IDEMPOTENT, but _not_ safe against concurrent callers.
2369  * The request owner (i.e. the thread doing the I/O) must call...
2370  * Returns 0 on success or 1 if unregistering cannot be made.
2371  */
2372 int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2373 {
2374         int                rc;
2375         struct l_wait_info lwi;
2376
2377         /*
2378          * Might sleep.
2379          */
2380         LASSERT(!in_interrupt());
2381
2382         /*
2383          * Let's setup deadline for reply unlink.
2384          */
2385         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2386             async && request->rq_reply_deadline == 0)
2387                 request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
2388
2389         /*
2390          * Nothing left to do.
2391          */
2392         if (!ptlrpc_client_recv_or_unlink(request))
2393                 RETURN(1);
2394
2395         LNetMDUnlink(request->rq_reply_md_h);
2396
2397         /*
2398          * Let's check it once again.
2399          */
2400         if (!ptlrpc_client_recv_or_unlink(request))
2401                 RETURN(1);
2402
2403         /*
2404          * Move to "Unregistering" phase as reply was not unlinked yet.
2405          */
2406         ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
2407
2408         /*
2409          * Do not wait for unlink to finish.
2410          */
2411         if (async)
2412                 RETURN(0);
2413
2414         /*
2415          * We have to l_wait_event() whatever the result, to give liblustre
2416          * a chance to run reply_in_callback(), and to make sure we've
2417          * unlinked before returning a req to the pool.
2418          */
2419         for (;;) {
2420 #ifdef __KERNEL__
2421                 /* The wq argument is ignored by user-space wait_event macros */
2422                 wait_queue_head_t *wq = (request->rq_set != NULL) ?
2423                                         &request->rq_set->set_waitq :
2424                                         &request->rq_reply_waitq;
2425 #endif
2426                 /* Network access will complete in finite time but the HUGE
2427                  * timeout lets us CWARN for visibility of sluggish NALs */
2428                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
2429                                            cfs_time_seconds(1), NULL, NULL);
2430                 rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
2431                                   &lwi);
2432                 if (rc == 0) {
2433                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2434                         RETURN(1);
2435                 }
2436
2437                 LASSERT(rc == -ETIMEDOUT);
2438                 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
2439                           "rvcng=%d unlnk=%d/%d", request->rq_receiving_reply,
2440                           request->rq_req_unlink, request->rq_reply_unlink);
2441         }
2442         RETURN(0);
2443 }
2444 EXPORT_SYMBOL(ptlrpc_unregister_reply);
2445
2446 static void ptlrpc_free_request(struct ptlrpc_request *req)
2447 {
2448         spin_lock(&req->rq_lock);
2449         req->rq_replay = 0;
2450         spin_unlock(&req->rq_lock);
2451
2452         if (req->rq_commit_cb != NULL)
2453                 req->rq_commit_cb(req);
2454         cfs_list_del_init(&req->rq_replay_list);
2455
2456         __ptlrpc_req_finished(req, 1);
2457 }
2458
2459 /**
2460  * the request is committed and dropped from the replay list of its import
2461  */
2462 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2463 {
2464         struct obd_import       *imp = req->rq_import;
2465
2466         spin_lock(&imp->imp_lock);
2467         if (cfs_list_empty(&req->rq_replay_list)) {
2468                 spin_unlock(&imp->imp_lock);
2469                 return;
2470         }
2471
2472         if (force || req->rq_transno <= imp->imp_peer_committed_transno)
2473                 ptlrpc_free_request(req);
2474
2475         spin_unlock(&imp->imp_lock);
2476 }
2477 EXPORT_SYMBOL(ptlrpc_request_committed);
2478
2479 /**
2480  * Iterates through replay_list on import and prunes
2481  * all requests have transno smaller than last_committed for the
2482  * import and don't have rq_replay set.
2483  * Since requests are sorted in transno order, stops when meetign first
2484  * transno bigger than last_committed.
2485  * caller must hold imp->imp_lock
2486  */
2487 void ptlrpc_free_committed(struct obd_import *imp)
2488 {
2489         struct ptlrpc_request   *req, *saved;
2490         struct ptlrpc_request   *last_req = NULL; /* temporary fire escape */
2491         bool                     skip_committed_list = true;
2492         ENTRY;
2493
2494         LASSERT(imp != NULL);
2495         assert_spin_locked(&imp->imp_lock);
2496
2497         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
2498             imp->imp_generation == imp->imp_last_generation_checked) {
2499                 CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
2500                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
2501                 RETURN_EXIT;
2502         }
2503         CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
2504                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
2505                imp->imp_generation);
2506
2507         if (imp->imp_generation != imp->imp_last_generation_checked)
2508                 skip_committed_list = false;
2509
2510         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
2511         imp->imp_last_generation_checked = imp->imp_generation;
2512
2513         cfs_list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2514                                      rq_replay_list) {
2515                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2516                 LASSERT(req != last_req);
2517                 last_req = req;
2518
2519                 if (req->rq_transno == 0) {
2520                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2521                         LBUG();
2522                 }
2523                 if (req->rq_import_generation < imp->imp_generation) {
2524                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2525                         GOTO(free_req, 0);
2526                 }
2527
2528                 /* not yet committed */
2529                 if (req->rq_transno > imp->imp_peer_committed_transno) {
2530                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2531                         break;
2532                 }
2533
2534                 if (req->rq_replay) {
2535                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
2536                         cfs_list_move_tail(&req->rq_replay_list,
2537                                            &imp->imp_committed_list);
2538                         continue;
2539                 }
2540
2541                 DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
2542                           imp->imp_peer_committed_transno);
2543 free_req:
2544                 ptlrpc_free_request(req);
2545         }
2546
2547         if (skip_committed_list)
2548                 GOTO(out, 0);
2549
2550         cfs_list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
2551                                      rq_replay_list) {
2552                 LASSERT(req->rq_transno != 0);
2553                 if (req->rq_import_generation < imp->imp_generation) {
2554                         DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
2555                         ptlrpc_free_request(req);
2556                 }
2557         }
2558 out:
2559         EXIT;
2560 }
2561
2562 void ptlrpc_cleanup_client(struct obd_import *imp)
2563 {
2564         ENTRY;
2565         EXIT;
2566 }
2567 EXPORT_SYMBOL(ptlrpc_cleanup_client);
2568
2569 /**
2570  * Schedule previously sent request for resend.
2571  * For bulk requests we assign new xid (to avoid problems with
2572  * lost replies and therefore several transfers landing into same buffer
2573  * from different sending attempts).
2574  */
2575 void ptlrpc_resend_req(struct ptlrpc_request *req)
2576 {
2577         DEBUG_REQ(D_HA, req, "going to resend");
2578         spin_lock(&req->rq_lock);
2579
2580         /* Request got reply but linked to the import list still.
2581            Let ptlrpc_check_set() to process it. */
2582         if (ptlrpc_client_replied(req)) {
2583                 spin_unlock(&req->rq_lock);
2584                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
2585                 return;
2586         }
2587
2588         lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
2589         req->rq_status = -EAGAIN;
2590
2591         req->rq_resend = 1;
2592         req->rq_net_err = 0;
2593         req->rq_timedout = 0;
2594         if (req->rq_bulk) {
2595                 __u64 old_xid = req->rq_xid;
2596
2597                 /* ensure previous bulk fails */
2598                 req->rq_xid = ptlrpc_next_xid();
2599                 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
2600                        old_xid, req->rq_xid);
2601         }
2602         ptlrpc_client_wake_req(req);
2603         spin_unlock(&req->rq_lock);
2604 }
2605 EXPORT_SYMBOL(ptlrpc_resend_req);
2606
2607 /* XXX: this function and rq_status are currently unused */
2608 void ptlrpc_restart_req(struct ptlrpc_request *req)
2609 {
2610         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
2611         req->rq_status = -ERESTARTSYS;
2612
2613         spin_lock(&req->rq_lock);
2614         req->rq_restart = 1;
2615         req->rq_timedout = 0;
2616         ptlrpc_client_wake_req(req);
2617         spin_unlock(&req->rq_lock);
2618 }
2619 EXPORT_SYMBOL(ptlrpc_restart_req);
2620
2621 /**
2622  * Grab additional reference on a request \a req
2623  */
2624 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
2625 {
2626         ENTRY;
2627         atomic_inc(&req->rq_refcount);
2628         RETURN(req);
2629 }
2630 EXPORT_SYMBOL(ptlrpc_request_addref);
2631
2632 /**
2633  * Add a request to import replay_list.
2634  * Must be called under imp_lock
2635  */
2636 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2637                                       struct obd_import *imp)
2638 {
2639         cfs_list_t *tmp;
2640
2641         assert_spin_locked(&imp->imp_lock);
2642
2643         if (req->rq_transno == 0) {
2644                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
2645                 LBUG();
2646         }
2647
2648         /* clear this for new requests that were resent as well
2649            as resent replayed requests. */
2650         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2651
2652         /* don't re-add requests that have been replayed */
2653         if (!cfs_list_empty(&req->rq_replay_list))
2654                 return;
2655
2656         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
2657
2658         LASSERT(imp->imp_replayable);
2659         /* Balanced in ptlrpc_free_committed, usually. */
2660         ptlrpc_request_addref(req);
2661         cfs_list_for_each_prev(tmp, &imp->imp_replay_list) {
2662                 struct ptlrpc_request *iter =
2663                         cfs_list_entry(tmp, struct ptlrpc_request,
2664                                        rq_replay_list);
2665
2666                 /* We may have duplicate transnos if we create and then
2667                  * open a file, or for closes retained if to match creating
2668                  * opens, so use req->rq_xid as a secondary key.
2669                  * (See bugs 684, 685, and 428.)
2670                  * XXX no longer needed, but all opens need transnos!
2671                  */
2672                 if (iter->rq_transno > req->rq_transno)
2673                         continue;
2674
2675                 if (iter->rq_transno == req->rq_transno) {
2676                         LASSERT(iter->rq_xid != req->rq_xid);
2677                         if (iter->rq_xid > req->rq_xid)
2678                                 continue;
2679                 }
2680
2681                 cfs_list_add(&req->rq_replay_list, &iter->rq_replay_list);
2682                 return;
2683         }
2684
2685         cfs_list_add(&req->rq_replay_list, &imp->imp_replay_list);
2686 }
2687 EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
2688
2689 /**
2690  * Send request and wait until it completes.
2691  * Returns request processing status.
2692  */
2693 int ptlrpc_queue_wait(struct ptlrpc_request *req)
2694 {
2695         struct ptlrpc_request_set *set;
2696         int rc;
2697         ENTRY;
2698
2699         LASSERT(req->rq_set == NULL);
2700         LASSERT(!req->rq_receiving_reply);
2701
2702         set = ptlrpc_prep_set();
2703         if (set == NULL) {
2704                 CERROR("Unable to allocate ptlrpc set.");
2705                 RETURN(-ENOMEM);
2706         }
2707
2708         /* for distributed debugging */
2709         lustre_msg_set_status(req->rq_reqmsg, current_pid());
2710
2711         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
2712         ptlrpc_request_addref(req);
2713         ptlrpc_set_add_req(set, req);
2714         rc = ptlrpc_set_wait(set);
2715         ptlrpc_set_destroy(set);
2716
2717         RETURN(rc);
2718 }
2719 EXPORT_SYMBOL(ptlrpc_queue_wait);
2720
2721 struct ptlrpc_replay_async_args {
2722         int praa_old_state;
2723         int praa_old_status;
2724 };
2725
2726 /**
2727  * Callback used for replayed requests reply processing.
2728  * In case of succesful reply calls registeresd request replay callback.
2729  * In case of error restart replay process.
2730  */
2731 static int ptlrpc_replay_interpret(const struct lu_env *env,
2732                                    struct ptlrpc_request *req,
2733                                    void * data, int rc)
2734 {
2735         struct ptlrpc_replay_async_args *aa = data;
2736         struct obd_import *imp = req->rq_import;
2737
2738         ENTRY;
2739         atomic_dec(&imp->imp_replay_inflight);
2740
2741         if (!ptlrpc_client_replied(req)) {
2742                 CERROR("request replay timed out, restarting recovery\n");
2743                 GOTO(out, rc = -ETIMEDOUT);
2744         }
2745
2746         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
2747             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
2748              lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
2749                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
2750
2751         /** VBR: check version failure */
2752         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
2753                 /** replay was failed due to version mismatch */
2754                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
2755                 spin_lock(&imp->imp_lock);
2756                 imp->imp_vbr_failed = 1;
2757                 imp->imp_no_lock_replay = 1;
2758                 spin_unlock(&imp->imp_lock);
2759                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
2760         } else {
2761                 /** The transno had better not change over replay. */
2762                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
2763                          lustre_msg_get_transno(req->rq_repmsg) ||
2764                          lustre_msg_get_transno(req->rq_repmsg) == 0,
2765                          LPX64"/"LPX64"\n",
2766                          lustre_msg_get_transno(req->rq_reqmsg),
2767                          lustre_msg_get_transno(req->rq_repmsg));
2768         }
2769
2770         spin_lock(&imp->imp_lock);
2771         /** if replays by version then gap occur on server, no trust to locks */
2772         if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
2773                 imp->imp_no_lock_replay = 1;
2774         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
2775         spin_unlock(&imp->imp_lock);
2776         LASSERT(imp->imp_last_replay_transno);
2777
2778         /* transaction number shouldn't be bigger than the latest replayed */
2779         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
2780                 DEBUG_REQ(D_ERROR, req,
2781                           "Reported transno "LPU64" is bigger than the "
2782                           "replayed one: "LPU64, req->rq_transno,
2783                           lustre_msg_get_transno(req->rq_reqmsg));
2784                 GOTO(out, rc = -EINVAL);
2785         }
2786
2787         DEBUG_REQ(D_HA, req, "got rep");
2788
2789         /* let the callback do fixups, possibly including in the request */
2790         if (req->rq_replay_cb)
2791                 req->rq_replay_cb(req);
2792
2793         if (ptlrpc_client_replied(req) &&
2794             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
2795                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
2796                           lustre_msg_get_status(req->rq_repmsg),
2797                           aa->praa_old_status);
2798         } else {
2799                 /* Put it back for re-replay. */
2800                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
2801         }
2802
2803         /*
2804          * Errors while replay can set transno to 0, but
2805          * imp_last_replay_transno shouldn't be set to 0 anyway
2806          */
2807         if (req->rq_transno == 0)
2808                 CERROR("Transno is 0 during replay!\n");
2809
2810         /* continue with recovery */
2811         rc = ptlrpc_import_recovery_state_machine(imp);
2812  out:
2813         req->rq_send_state = aa->praa_old_state;
2814
2815         if (rc != 0)
2816                 /* this replay failed, so restart recovery */
2817                 ptlrpc_connect_import(imp);
2818
2819         RETURN(rc);
2820 }
2821
2822 /**
2823  * Prepares and queues request for replay.
2824  * Adds it to ptlrpcd queue for actual sending.
2825  * Returns 0 on success.
2826  */
2827 int ptlrpc_replay_req(struct ptlrpc_request *req)
2828 {
2829         struct ptlrpc_replay_async_args *aa;
2830         ENTRY;
2831
2832         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
2833
2834         LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
2835         aa = ptlrpc_req_async_args(req);
2836         memset(aa, 0, sizeof *aa);
2837
2838         /* Prepare request to be resent with ptlrpcd */
2839         aa->praa_old_state = req->rq_send_state;
2840         req->rq_send_state = LUSTRE_IMP_REPLAY;
2841         req->rq_phase = RQ_PHASE_NEW;
2842         req->rq_next_phase = RQ_PHASE_UNDEFINED;
2843         if (req->rq_repmsg)
2844                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
2845         req->rq_status = 0;
2846         req->rq_interpret_reply = ptlrpc_replay_interpret;
2847         /* Readjust the timeout for current conditions */
2848         ptlrpc_at_set_req_timeout(req);
2849
2850         /* Tell server the net_latency, so the server can calculate how long
2851          * it should wait for next replay */
2852         lustre_msg_set_service_time(req->rq_reqmsg,
2853                                     ptlrpc_at_get_net_latency(req));
2854         DEBUG_REQ(D_HA, req, "REPLAY");
2855
2856         atomic_inc(&req->rq_import->imp_replay_inflight);
2857         ptlrpc_request_addref(req);     /* ptlrpcd needs a ref */
2858
2859         ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
2860         RETURN(0);
2861 }
2862 EXPORT_SYMBOL(ptlrpc_replay_req);
2863
2864 /**
2865  * Aborts all in-flight request on import \a imp sending and delayed lists
2866  */
2867 void ptlrpc_abort_inflight(struct obd_import *imp)
2868 {
2869         cfs_list_t *tmp, *n;
2870         ENTRY;
2871
2872         /* Make sure that no new requests get processed for this import.
2873          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
2874          * this flag and then putting requests on sending_list or delayed_list.
2875          */
2876         spin_lock(&imp->imp_lock);
2877
2878         /* XXX locking?  Maybe we should remove each request with the list
2879          * locked?  Also, how do we know if the requests on the list are
2880          * being freed at this time?
2881          */
2882         cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
2883                 struct ptlrpc_request *req =
2884                         cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
2885
2886                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
2887
2888                 spin_lock(&req->rq_lock);
2889                 if (req->rq_import_generation < imp->imp_generation) {
2890                         req->rq_err = 1;
2891                         req->rq_status = -EIO;
2892                         ptlrpc_client_wake_req(req);
2893                 }
2894                 spin_unlock(&req->rq_lock);
2895         }
2896
2897         cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
2898                 struct ptlrpc_request *req =
2899                         cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
2900
2901                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
2902
2903                 spin_lock(&req->rq_lock);
2904                 if (req->rq_import_generation < imp->imp_generation) {
2905                         req->rq_err = 1;
2906                         req->rq_status = -EIO;
2907                         ptlrpc_client_wake_req(req);
2908                 }
2909                 spin_unlock(&req->rq_lock);
2910         }
2911
2912         /* Last chance to free reqs left on the replay list, but we
2913          * will still leak reqs that haven't committed.  */
2914         if (imp->imp_replayable)
2915                 ptlrpc_free_committed(imp);
2916
2917         spin_unlock(&imp->imp_lock);
2918
2919         EXIT;
2920 }
2921 EXPORT_SYMBOL(ptlrpc_abort_inflight);
2922
2923 /**
2924  * Abort all uncompleted requests in request set \a set
2925  */
2926 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
2927 {
2928         cfs_list_t *tmp, *pos;
2929
2930         LASSERT(set != NULL);
2931
2932         cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
2933                 struct ptlrpc_request *req =
2934                         cfs_list_entry(pos, struct ptlrpc_request,
2935                                        rq_set_chain);
2936
2937                 spin_lock(&req->rq_lock);
2938                 if (req->rq_phase != RQ_PHASE_RPC) {
2939                         spin_unlock(&req->rq_lock);
2940                         continue;
2941                 }
2942
2943                 req->rq_err = 1;
2944                 req->rq_status = -EINTR;
2945                 ptlrpc_client_wake_req(req);
2946                 spin_unlock(&req->rq_lock);
2947         }
2948 }
2949
2950 static __u64 ptlrpc_last_xid;
2951 static spinlock_t ptlrpc_last_xid_lock;
2952
2953 /**
2954  * Initialize the XID for the node.  This is common among all requests on
2955  * this node, and only requires the property that it is monotonically
2956  * increasing.  It does not need to be sequential.  Since this is also used
2957  * as the RDMA match bits, it is important that a single client NOT have
2958  * the same match bits for two different in-flight requests, hence we do
2959  * NOT want to have an XID per target or similar.
2960  *
2961  * To avoid an unlikely collision between match bits after a client reboot
2962  * (which would deliver old data into the wrong RDMA buffer) initialize
2963  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
2964  * If the time is clearly incorrect, we instead use a 62-bit random number.
2965  * In the worst case the random number will overflow 1M RPCs per second in
2966  * 9133 years, or permutations thereof.
2967  */
2968 #define YEAR_2004 (1ULL << 30)
2969 void ptlrpc_init_xid(void)
2970 {
2971         time_t now = cfs_time_current_sec();
2972
2973         spin_lock_init(&ptlrpc_last_xid_lock);
2974         if (now < YEAR_2004) {
2975                 cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
2976                 ptlrpc_last_xid >>= 2;
2977                 ptlrpc_last_xid |= (1ULL << 61);
2978         } else {
2979                 ptlrpc_last_xid = (__u64)now << 20;
2980         }
2981
2982         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
2983         CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
2984         ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
2985 }
2986
2987 /**
2988  * Increase xid and returns resulting new value to the caller.
2989  *
2990  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
2991  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
2992  * itself uses the last bulk xid needed, so the server can determine the
2993  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
2994  * xid must align to a power-of-two value.
2995  *
2996  * This is assumed to be true due to the initial ptlrpc_last_xid
2997  * value also being initialized to a power-of-two value. LU-1431
2998  */
2999 __u64 ptlrpc_next_xid(void)
3000 {
3001         __u64 next;
3002
3003         spin_lock(&ptlrpc_last_xid_lock);
3004         next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
3005         ptlrpc_last_xid = next;
3006         spin_unlock(&ptlrpc_last_xid_lock);
3007
3008         return next;
3009 }
3010 EXPORT_SYMBOL(ptlrpc_next_xid);
3011
3012 /**
3013  * Get a glimpse at what next xid value might have been.
3014  * Returns possible next xid.
3015  */
3016 __u64 ptlrpc_sample_next_xid(void)
3017 {
3018 #if BITS_PER_LONG == 32
3019         /* need to avoid possible word tearing on 32-bit systems */
3020         __u64 next;
3021
3022         spin_lock(&ptlrpc_last_xid_lock);
3023         next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
3024         spin_unlock(&ptlrpc_last_xid_lock);
3025
3026         return next;
3027 #else
3028         /* No need to lock, since returned value is racy anyways */
3029         return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
3030 #endif
3031 }
3032 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3033
3034 /**
3035  * Functions for operating ptlrpc workers.
3036  *
3037  * A ptlrpc work is a function which will be running inside ptlrpc context.
3038  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3039  *
3040  * 1. after a work is created, it can be used many times, that is:
3041  *         handler = ptlrpcd_alloc_work();
3042  *         ptlrpcd_queue_work();
3043  *
3044  *    queue it again when necessary:
3045  *         ptlrpcd_queue_work();
3046  *         ptlrpcd_destroy_work();
3047  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3048  *    it will only be queued once in any time. Also as its name implies, it may
3049  *    have delay before it really runs by ptlrpcd thread.
3050  */
3051 struct ptlrpc_work_async_args {
3052         int   (*cb)(const struct lu_env *, void *);
3053         void   *cbdata;
3054 };
3055
3056 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3057 {
3058         /* re-initialize the req */
3059         req->rq_timeout         = obd_timeout;
3060         req->rq_sent            = cfs_time_current_sec();
3061         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3062         req->rq_reply_deadline  = req->rq_deadline;
3063         req->rq_phase           = RQ_PHASE_INTERPRET;
3064         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3065         req->rq_xid             = ptlrpc_next_xid();
3066         req->rq_import_generation = req->rq_import->imp_generation;
3067
3068         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
3069 }
3070
3071 static int work_interpreter(const struct lu_env *env,
3072                             struct ptlrpc_request *req, void *data, int rc)
3073 {
3074         struct ptlrpc_work_async_args *arg = data;
3075
3076         LASSERT(ptlrpcd_check_work(req));
3077         LASSERT(arg->cb != NULL);
3078
3079         rc = arg->cb(env, arg->cbdata);
3080
3081         list_del_init(&req->rq_set_chain);
3082         req->rq_set = NULL;
3083
3084         if (atomic_dec_return(&req->rq_refcount) > 1) {
3085                 atomic_set(&req->rq_refcount, 2);
3086                 ptlrpcd_add_work_req(req);
3087         }
3088         return rc;
3089 }
3090
3091 static int worker_format;
3092
3093 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3094 {
3095         return req->rq_pill.rc_fmt == (void *)&worker_format;
3096 }
3097
3098 /**
3099  * Create a work for ptlrpc.
3100  */
3101 void *ptlrpcd_alloc_work(struct obd_import *imp,
3102                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3103 {
3104         struct ptlrpc_request         *req = NULL;
3105         struct ptlrpc_work_async_args *args;
3106         ENTRY;
3107
3108         might_sleep();
3109
3110         if (cb == NULL)
3111                 RETURN(ERR_PTR(-EINVAL));
3112
3113         /* copy some code from deprecated fakereq. */
3114         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3115         if (req == NULL) {
3116                 CERROR("ptlrpc: run out of memory!\n");
3117                 RETURN(ERR_PTR(-ENOMEM));
3118         }
3119
3120         req->rq_send_state = LUSTRE_IMP_FULL;
3121         req->rq_type = PTL_RPC_MSG_REQUEST;
3122         req->rq_import = class_import_get(imp);
3123         req->rq_export = NULL;
3124         req->rq_interpret_reply = work_interpreter;
3125         /* don't want reply */
3126         req->rq_receiving_reply = 0;
3127         req->rq_req_unlink = req->rq_reply_unlink = 0;
3128         req->rq_no_delay = req->rq_no_resend = 1;
3129         req->rq_pill.rc_fmt = (void *)&worker_format;
3130
3131         spin_lock_init(&req->rq_lock);
3132         CFS_INIT_LIST_HEAD(&req->rq_list);
3133         CFS_INIT_LIST_HEAD(&req->rq_replay_list);
3134         CFS_INIT_LIST_HEAD(&req->rq_set_chain);
3135         CFS_INIT_LIST_HEAD(&req->rq_history_list);
3136         CFS_INIT_LIST_HEAD(&req->rq_exp_list);
3137         init_waitqueue_head(&req->rq_reply_waitq);
3138         init_waitqueue_head(&req->rq_set_waitq);
3139         atomic_set(&req->rq_refcount, 1);
3140
3141         CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
3142         args = ptlrpc_req_async_args(req);
3143         args->cb     = cb;
3144         args->cbdata = cbdata;
3145
3146         RETURN(req);
3147 }
3148 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3149
3150 void ptlrpcd_destroy_work(void *handler)
3151 {
3152         struct ptlrpc_request *req = handler;
3153
3154         if (req)
3155                 ptlrpc_req_finished(req);
3156 }
3157 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3158
3159 int ptlrpcd_queue_work(void *handler)
3160 {
3161         struct ptlrpc_request *req = handler;
3162
3163         /*
3164          * Check if the req is already being queued.
3165          *
3166          * Here comes a trick: it lacks a way of checking if a req is being
3167          * processed reliably in ptlrpc. Here I have to use refcount of req
3168          * for this purpose. This is okay because the caller should use this
3169          * req as opaque data. - Jinshan
3170          */
3171         LASSERT(atomic_read(&req->rq_refcount) > 0);
3172         if (atomic_inc_return(&req->rq_refcount) == 2)
3173                 ptlrpcd_add_work_req(req);
3174         return 0;
3175 }
3176 EXPORT_SYMBOL(ptlrpcd_queue_work);