Whamcloud - gitweb
8c2a833ff56268c348c3f1dc60bcff6f3bc6ba3e
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 /** Implementation of client-side PortalRPC interfaces */
38
39 #define DEBUG_SUBSYSTEM S_RPC
40
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_lib.h>
44 #include <lustre_ha.h>
45 #include <lustre_import.h>
46 #include <lustre_req_layout.h>
47
48 #include "ptlrpc_internal.h"
49
50 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
51 static int ptlrpcd_check_work(struct ptlrpc_request *req);
52
53 /**
54  * Initialize passed in client structure \a cl.
55  */
56 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
57                         struct ptlrpc_client *cl)
58 {
59         cl->cli_request_portal = req_portal;
60         cl->cli_reply_portal   = rep_portal;
61         cl->cli_name           = name;
62 }
63 EXPORT_SYMBOL(ptlrpc_init_client);
64
65 /**
66  * Return PortalRPC connection for remore uud \a uuid
67  */
68 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
69 {
70         struct ptlrpc_connection *c;
71         lnet_nid_t                self;
72         lnet_process_id_t         peer;
73         int                       err;
74
75         /* ptlrpc_uuid_to_peer() initializes its 2nd parameter
76          * before accessing its values. */
77         /* coverity[uninit_use_in_call] */
78         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
79         if (err != 0) {
80                 CNETERR("cannot find peer %s!\n", uuid->uuid);
81                 return NULL;
82         }
83
84         c = ptlrpc_connection_get(peer, self, uuid);
85         if (c) {
86                 memcpy(c->c_remote_uuid.uuid,
87                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
88         }
89
90         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
91
92         return c;
93 }
94 EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
95
96 /**
97  * Allocate and initialize new bulk descriptor on the sender.
98  * Returns pointer to the descriptor or NULL on error.
99  */
100 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
101                                          unsigned type, unsigned portal)
102 {
103         struct ptlrpc_bulk_desc *desc;
104         int i;
105
106         OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]));
107         if (!desc)
108                 return NULL;
109
110         spin_lock_init(&desc->bd_lock);
111         init_waitqueue_head(&desc->bd_waitq);
112         desc->bd_max_iov = npages;
113         desc->bd_iov_count = 0;
114         desc->bd_portal = portal;
115         desc->bd_type = type;
116         desc->bd_md_count = 0;
117         LASSERT(max_brw > 0);
118         desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
119         /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
120          * node. Negotiated ocd_brw_size will always be <= this number. */
121         for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
122                 LNetInvalidateHandle(&desc->bd_mds[i]);
123
124         return desc;
125 }
126
127 /**
128  * Prepare bulk descriptor for specified outgoing request \a req that
129  * can fit \a npages * pages. \a type is bulk type. \a portal is where
130  * the bulk to be sent. Used on client-side.
131  * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
132  * error.
133  */
134 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
135                                               unsigned npages, unsigned max_brw,
136                                               unsigned type, unsigned portal)
137 {
138         struct obd_import *imp = req->rq_import;
139         struct ptlrpc_bulk_desc *desc;
140
141         ENTRY;
142         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
143         desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
144         if (desc == NULL)
145                 RETURN(NULL);
146
147         desc->bd_import_generation = req->rq_import_generation;
148         desc->bd_import = class_import_get(imp);
149         desc->bd_req = req;
150
151         desc->bd_cbid.cbid_fn  = client_bulk_callback;
152         desc->bd_cbid.cbid_arg = desc;
153
154         /* This makes req own desc, and free it when she frees herself */
155         req->rq_bulk = desc;
156
157         return desc;
158 }
159 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
160
161 /*
162  * Add a page \a page to the bulk descriptor \a desc.
163  * Data to transfer in the page starts at offset \a pageoffset and
164  * amount of data to transfer from the page is \a len
165  */
166 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
167                              struct page *page, int pageoffset, int len, int pin)
168 {
169         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
170         LASSERT(page != NULL);
171         LASSERT(pageoffset >= 0);
172         LASSERT(len > 0);
173         LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
174
175         desc->bd_nob += len;
176
177         if (pin)
178                 page_cache_get(page);
179
180         ptlrpc_add_bulk_page(desc, page, pageoffset, len);
181 }
182 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
183
184 /**
185  * Uninitialize and free bulk descriptor \a desc.
186  * Works on bulk descriptors both from server and client side.
187  */
188 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
189 {
190         int i;
191         ENTRY;
192
193         LASSERT(desc != NULL);
194         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
195         LASSERT(desc->bd_md_count == 0);         /* network hands off */
196         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
197
198         sptlrpc_enc_pool_put_pages(desc);
199
200         if (desc->bd_export)
201                 class_export_put(desc->bd_export);
202         else
203                 class_import_put(desc->bd_import);
204
205         if (unpin) {
206                 for (i = 0; i < desc->bd_iov_count ; i++)
207                         page_cache_release(desc->bd_iov[i].kiov_page);
208         }
209
210         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
211                                 bd_iov[desc->bd_max_iov]));
212         EXIT;
213 }
214 EXPORT_SYMBOL(__ptlrpc_free_bulk);
215
216 /**
217  * Set server timelimit for this req, i.e. how long are we willing to wait
218  * for reply before timing out this request.
219  */
220 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
221 {
222         __u32 serv_est;
223         int idx;
224         struct imp_at *at;
225
226         LASSERT(req->rq_import);
227
228         if (AT_OFF) {
229                 /* non-AT settings */
230                 /**
231                  * \a imp_server_timeout means this is reverse import and
232                  * we send (currently only) ASTs to the client and cannot afford
233                  * to wait too long for the reply, otherwise the other client
234                  * (because of which we are sending this request) would
235                  * timeout waiting for us
236                  */
237                 req->rq_timeout = req->rq_import->imp_server_timeout ?
238                                   obd_timeout / 2 : obd_timeout;
239         } else {
240                 at = &req->rq_import->imp_at;
241                 idx = import_at_get_index(req->rq_import,
242                                           req->rq_request_portal);
243                 serv_est = at_get(&at->iat_service_estimate[idx]);
244                 req->rq_timeout = at_est2timeout(serv_est);
245         }
246         /* We could get even fancier here, using history to predict increased
247            loading... */
248
249         /* Let the server know what this RPC timeout is by putting it in the
250            reqmsg*/
251         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
252 }
253 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
254
255 /* Adjust max service estimate based on server value */
256 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
257                                   unsigned int serv_est)
258 {
259         int idx;
260         unsigned int oldse;
261         struct imp_at *at;
262
263         LASSERT(req->rq_import);
264         at = &req->rq_import->imp_at;
265
266         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
267         /* max service estimates are tracked on the server side,
268            so just keep minimal history here */
269         oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
270         if (oldse != 0)
271                 CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
272                        "has changed from %d to %d\n",
273                        req->rq_import->imp_obd->obd_name,req->rq_request_portal,
274                        oldse, at_get(&at->iat_service_estimate[idx]));
275 }
276
277 /* Expected network latency per remote node (secs) */
278 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
279 {
280         return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
281 }
282
283 /* Adjust expected network latency */
284 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
285                                       unsigned int service_time)
286 {
287         unsigned int nl, oldnl;
288         struct imp_at *at;
289         time_t now = cfs_time_current_sec();
290
291         LASSERT(req->rq_import);
292         at = &req->rq_import->imp_at;
293
294         /* Network latency is total time less server processing time */
295         nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/;
296         if (service_time > now - req->rq_sent + 3 /* bz16408 */)
297                 CWARN("Reported service time %u > total measured time "
298                       CFS_DURATION_T"\n", service_time,
299                       cfs_time_sub(now, req->rq_sent));
300
301         oldnl = at_measured(&at->iat_net_latency, nl);
302         if (oldnl != 0)
303                 CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
304                        "has changed from %d to %d\n",
305                        req->rq_import->imp_obd->obd_name,
306                        obd_uuid2str(
307                                &req->rq_import->imp_connection->c_remote_uuid),
308                        oldnl, at_get(&at->iat_net_latency));
309 }
310
311 static int unpack_reply(struct ptlrpc_request *req)
312 {
313         int rc;
314
315         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
316                 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
317                 if (rc) {
318                         DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
319                         return(-EPROTO);
320                 }
321         }
322
323         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
324         if (rc) {
325                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
326                 return(-EPROTO);
327         }
328         return 0;
329 }
330
331 /**
332  * Handle an early reply message, called with the rq_lock held.
333  * If anything goes wrong just ignore it - same as if it never happened
334  */
335 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
336 __must_hold(&req->rq_lock)
337 {
338         struct ptlrpc_request *early_req;
339         time_t                 olddl;
340         int                    rc;
341         ENTRY;
342
343         req->rq_early = 0;
344         spin_unlock(&req->rq_lock);
345
346         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
347         if (rc) {
348                 spin_lock(&req->rq_lock);
349                 RETURN(rc);
350         }
351
352         rc = unpack_reply(early_req);
353         if (rc == 0) {
354                 /* Expecting to increase the service time estimate here */
355                 ptlrpc_at_adj_service(req,
356                         lustre_msg_get_timeout(early_req->rq_repmsg));
357                 ptlrpc_at_adj_net_latency(req,
358                         lustre_msg_get_service_time(early_req->rq_repmsg));
359         }
360
361         sptlrpc_cli_finish_early_reply(early_req);
362
363         if (rc != 0) {
364                 spin_lock(&req->rq_lock);
365                 RETURN(rc);
366         }
367
368         /* Adjust the local timeout for this req */
369         ptlrpc_at_set_req_timeout(req);
370
371         spin_lock(&req->rq_lock);
372         olddl = req->rq_deadline;
373         /* server assumes it now has rq_timeout from when the request
374          * arrived, so the client should give it at least that long.
375          * since we don't know the arrival time we'll use the original
376          * sent time */
377         req->rq_deadline = req->rq_sent + req->rq_timeout +
378                            ptlrpc_at_get_net_latency(req);
379
380         DEBUG_REQ(D_ADAPTTO, req,
381                   "Early reply #%d, new deadline in "CFS_DURATION_T"s "
382                   "("CFS_DURATION_T"s)", req->rq_early_count,
383                   cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
384                   cfs_time_sub(req->rq_deadline, olddl));
385
386         RETURN(rc);
387 }
388
389 struct kmem_cache *request_cache;
390
391 int ptlrpc_request_cache_init(void)
392 {
393         request_cache = kmem_cache_create("ptlrpc_cache",
394                                           sizeof(struct ptlrpc_request),
395                                           0, SLAB_HWCACHE_ALIGN, NULL);
396         return request_cache == NULL ? -ENOMEM : 0;
397 }
398
399 void ptlrpc_request_cache_fini(void)
400 {
401         kmem_cache_destroy(request_cache);
402 }
403
404 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
405 {
406         struct ptlrpc_request *req;
407
408         OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags);
409         return req;
410 }
411
412 void ptlrpc_request_cache_free(struct ptlrpc_request *req)
413 {
414         OBD_SLAB_FREE_PTR(req, request_cache);
415 }
416
417 /**
418  * Wind down request pool \a pool.
419  * Frees all requests from the pool too
420  */
421 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
422 {
423         struct list_head *l, *tmp;
424         struct ptlrpc_request *req;
425
426         LASSERT(pool != NULL);
427
428         spin_lock(&pool->prp_lock);
429         list_for_each_safe(l, tmp, &pool->prp_req_list) {
430                 req = list_entry(l, struct ptlrpc_request, rq_list);
431                 list_del(&req->rq_list);
432                 LASSERT(req->rq_reqbuf);
433                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
434                 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
435                 ptlrpc_request_cache_free(req);
436         }
437         spin_unlock(&pool->prp_lock);
438         OBD_FREE(pool, sizeof(*pool));
439 }
440 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
441
442 /**
443  * Allocates, initializes and adds \a num_rq requests to the pool \a pool
444  */
445 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
446 {
447         int i;
448         int size = 1;
449
450         while (size < pool->prp_rq_size)
451                 size <<= 1;
452
453         LASSERTF(list_empty(&pool->prp_req_list) ||
454                  size == pool->prp_rq_size,
455                  "Trying to change pool size with nonempty pool "
456                  "from %d to %d bytes\n", pool->prp_rq_size, size);
457
458         spin_lock(&pool->prp_lock);
459         pool->prp_rq_size = size;
460         for (i = 0; i < num_rq; i++) {
461                 struct ptlrpc_request *req;
462                 struct lustre_msg *msg;
463
464                 spin_unlock(&pool->prp_lock);
465                 req = ptlrpc_request_cache_alloc(GFP_NOFS);
466                 if (!req)
467                         return;
468                 OBD_ALLOC_LARGE(msg, size);
469                 if (!msg) {
470                         ptlrpc_request_cache_free(req);
471                         return;
472                 }
473                 req->rq_reqbuf = msg;
474                 req->rq_reqbuf_len = size;
475                 req->rq_pool = pool;
476                 spin_lock(&pool->prp_lock);
477                 list_add_tail(&req->rq_list, &pool->prp_req_list);
478         }
479         spin_unlock(&pool->prp_lock);
480         return;
481 }
482 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
483
484 /**
485  * Create and initialize new request pool with given attributes:
486  * \a num_rq - initial number of requests to create for the pool
487  * \a msgsize - maximum message size possible for requests in thid pool
488  * \a populate_pool - function to be called when more requests need to be added
489  *                    to the pool
490  * Returns pointer to newly created pool or NULL on error.
491  */
492 struct ptlrpc_request_pool *
493 ptlrpc_init_rq_pool(int num_rq, int msgsize,
494                     void (*populate_pool)(struct ptlrpc_request_pool *, int))
495 {
496         struct ptlrpc_request_pool *pool;
497
498         OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
499         if (!pool)
500                 return NULL;
501
502         /* Request next power of two for the allocation, because internally
503            kernel would do exactly this */
504
505         spin_lock_init(&pool->prp_lock);
506         INIT_LIST_HEAD(&pool->prp_req_list);
507         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
508         pool->prp_populate = populate_pool;
509
510         populate_pool(pool, num_rq);
511
512         if (list_empty(&pool->prp_req_list)) {
513                 /* have not allocated a single request for the pool */
514                 OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
515                 pool = NULL;
516         }
517         return pool;
518 }
519 EXPORT_SYMBOL(ptlrpc_init_rq_pool);
520
521 /**
522  * Fetches one request from pool \a pool
523  */
524 static struct ptlrpc_request *
525 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
526 {
527         struct ptlrpc_request *request;
528         struct lustre_msg *reqbuf;
529
530         if (!pool)
531                 return NULL;
532
533         spin_lock(&pool->prp_lock);
534
535         /* See if we have anything in a pool, and bail out if nothing,
536          * in writeout path, where this matters, this is safe to do, because
537          * nothing is lost in this case, and when some in-flight requests
538          * complete, this code will be called again. */
539         if (unlikely(list_empty(&pool->prp_req_list))) {
540                 spin_unlock(&pool->prp_lock);
541                 return NULL;
542         }
543
544         request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
545                              rq_list);
546         list_del_init(&request->rq_list);
547         spin_unlock(&pool->prp_lock);
548
549         LASSERT(request->rq_reqbuf);
550         LASSERT(request->rq_pool);
551
552         reqbuf = request->rq_reqbuf;
553         memset(request, 0, sizeof(*request));
554         request->rq_reqbuf = reqbuf;
555         request->rq_reqbuf_len = pool->prp_rq_size;
556         request->rq_pool = pool;
557
558         return request;
559 }
560
561 /**
562  * Returns freed \a request to pool.
563  */
564 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
565 {
566         struct ptlrpc_request_pool *pool = request->rq_pool;
567
568         spin_lock(&pool->prp_lock);
569         LASSERT(list_empty(&request->rq_list));
570         LASSERT(!request->rq_receiving_reply);
571         list_add_tail(&request->rq_list, &pool->prp_req_list);
572         spin_unlock(&pool->prp_lock);
573 }
574
575 static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
576                                       __u32 version, int opcode,
577                                       int count, __u32 *lengths, char **bufs,
578                                       struct ptlrpc_cli_ctx *ctx)
579 {
580         struct obd_import  *imp = request->rq_import;
581         int                 rc;
582         ENTRY;
583
584         if (unlikely(ctx))
585                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
586         else {
587                 rc = sptlrpc_req_get_ctx(request);
588                 if (rc)
589                         GOTO(out_free, rc);
590         }
591
592         sptlrpc_req_set_flavor(request, opcode);
593
594         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
595                                  lengths, bufs);
596         if (rc) {
597                 LASSERT(!request->rq_pool);
598                 GOTO(out_ctx, rc);
599         }
600
601         lustre_msg_add_version(request->rq_reqmsg, version);
602         request->rq_send_state = LUSTRE_IMP_FULL;
603         request->rq_type = PTL_RPC_MSG_REQUEST;
604         request->rq_export = NULL;
605
606         request->rq_req_cbid.cbid_fn  = request_out_callback;
607         request->rq_req_cbid.cbid_arg = request;
608
609         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
610         request->rq_reply_cbid.cbid_arg = request;
611
612         request->rq_reply_deadline = 0;
613         request->rq_phase = RQ_PHASE_NEW;
614         request->rq_next_phase = RQ_PHASE_UNDEFINED;
615
616         request->rq_request_portal = imp->imp_client->cli_request_portal;
617         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
618
619         ptlrpc_at_set_req_timeout(request);
620
621         spin_lock_init(&request->rq_lock);
622         INIT_LIST_HEAD(&request->rq_list);
623         INIT_LIST_HEAD(&request->rq_timed_list);
624         INIT_LIST_HEAD(&request->rq_replay_list);
625         INIT_LIST_HEAD(&request->rq_ctx_chain);
626         INIT_LIST_HEAD(&request->rq_set_chain);
627         INIT_LIST_HEAD(&request->rq_history_list);
628         INIT_LIST_HEAD(&request->rq_exp_list);
629         init_waitqueue_head(&request->rq_reply_waitq);
630         init_waitqueue_head(&request->rq_set_waitq);
631         request->rq_xid = ptlrpc_next_xid();
632         atomic_set(&request->rq_refcount, 1);
633
634         lustre_msg_set_opc(request->rq_reqmsg, opcode);
635
636         RETURN(0);
637 out_ctx:
638         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
639 out_free:
640         class_import_put(imp);
641         return rc;
642 }
643
644 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
645                              __u32 version, int opcode, char **bufs,
646                              struct ptlrpc_cli_ctx *ctx)
647 {
648         int count;
649
650         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
651         return __ptlrpc_request_bufs_pack(request, version, opcode, count,
652                                           request->rq_pill.rc_area[RCL_CLIENT],
653                                           bufs, ctx);
654 }
655 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
656
657 /**
658  * Pack request buffers for network transfer, performing necessary encryption
659  * steps if necessary.
660  */
661 int ptlrpc_request_pack(struct ptlrpc_request *request,
662                         __u32 version, int opcode)
663 {
664         int rc;
665         rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
666         if (rc)
667                 return rc;
668
669         /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
670          * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
671          * have to send old ptlrpc_body to keep interoprability with these
672          * clients.
673          *
674          * Only three kinds of server->client RPCs so far:
675          *  - LDLM_BL_CALLBACK
676          *  - LDLM_CP_CALLBACK
677          *  - LDLM_GL_CALLBACK
678          *
679          * XXX This should be removed whenever we drop the interoprability with
680          *     the these old clients.
681          */
682         if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
683             opcode == LDLM_GL_CALLBACK)
684                 req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY,
685                                    sizeof(struct ptlrpc_body_v2), RCL_CLIENT);
686
687         return rc;
688 }
689 EXPORT_SYMBOL(ptlrpc_request_pack);
690
691 /**
692  * Helper function to allocate new request on import \a imp
693  * and possibly using existing request from pool \a pool if provided.
694  * Returns allocated request structure with import field filled or
695  * NULL on error.
696  */
697 static inline
698 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
699                                               struct ptlrpc_request_pool *pool)
700 {
701         struct ptlrpc_request *request = NULL;
702
703         if (pool)
704                 request = ptlrpc_prep_req_from_pool(pool);
705
706         if (!request)
707                 request = ptlrpc_request_cache_alloc(GFP_NOFS);
708
709         if (request) {
710                 LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp);
711                 LASSERT(imp != LP_POISON);
712                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
713                         imp->imp_client);
714                 LASSERT(imp->imp_client != LP_POISON);
715
716                 request->rq_import = class_import_get(imp);
717         } else {
718                 CERROR("request allocation out of memory\n");
719         }
720
721         return request;
722 }
723
724 /**
725  * Helper function for creating a request.
726  * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
727  * buffer structures according to capsule template \a format.
728  * Returns allocated request structure pointer or NULL on error.
729  */
730 static struct ptlrpc_request *
731 ptlrpc_request_alloc_internal(struct obd_import *imp,
732                               struct ptlrpc_request_pool * pool,
733                               const struct req_format *format)
734 {
735         struct ptlrpc_request *request;
736
737         request = __ptlrpc_request_alloc(imp, pool);
738         if (request == NULL)
739                 return NULL;
740
741         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
742         req_capsule_set(&request->rq_pill, format);
743         return request;
744 }
745
746 /**
747  * Allocate new request structure for import \a imp and initialize its
748  * buffer structure according to capsule template \a format.
749  */
750 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
751                                             const struct req_format *format)
752 {
753         return ptlrpc_request_alloc_internal(imp, NULL, format);
754 }
755 EXPORT_SYMBOL(ptlrpc_request_alloc);
756
757 /**
758  * Allocate new request structure for import \a imp from pool \a pool and
759  * initialize its buffer structure according to capsule template \a format.
760  */
761 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
762                                             struct ptlrpc_request_pool * pool,
763                                             const struct req_format *format)
764 {
765         return ptlrpc_request_alloc_internal(imp, pool, format);
766 }
767 EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
768
769 /**
770  * For requests not from pool, free memory of the request structure.
771  * For requests obtained from a pool earlier, return request back to pool.
772  */
773 void ptlrpc_request_free(struct ptlrpc_request *request)
774 {
775         if (request->rq_pool)
776                 __ptlrpc_free_req_to_pool(request);
777         else
778                 ptlrpc_request_cache_free(request);
779 }
780 EXPORT_SYMBOL(ptlrpc_request_free);
781
782 /**
783  * Allocate new request for operatione \a opcode and immediatelly pack it for
784  * network transfer.
785  * Only used for simple requests like OBD_PING where the only important
786  * part of the request is operation itself.
787  * Returns allocated request or NULL on error.
788  */
789 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
790                                                 const struct req_format *format,
791                                                 __u32 version, int opcode)
792 {
793         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
794         int                    rc;
795
796         if (req) {
797                 rc = ptlrpc_request_pack(req, version, opcode);
798                 if (rc) {
799                         ptlrpc_request_free(req);
800                         req = NULL;
801                 }
802         }
803         return req;
804 }
805 EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
806
807 /**
808  * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
809  * for operation \a opcode. Request would contain \a count buffers.
810  * Sizes of buffers are described in array \a lengths and buffers themselves
811  * are provided by a pointer \a bufs.
812  * Returns prepared request structure pointer or NULL on error.
813  */
814 struct ptlrpc_request *
815 ptlrpc_prep_req_pool(struct obd_import *imp,
816                      __u32 version, int opcode,
817                      int count, __u32 *lengths, char **bufs,
818                      struct ptlrpc_request_pool *pool)
819 {
820         struct ptlrpc_request *request;
821         int                    rc;
822
823         request = __ptlrpc_request_alloc(imp, pool);
824         if (!request)
825                 return NULL;
826
827         rc = __ptlrpc_request_bufs_pack(request, version, opcode, count,
828                                         lengths, bufs, NULL);
829         if (rc) {
830                 ptlrpc_request_free(request);
831                 request = NULL;
832         }
833         return request;
834 }
835 EXPORT_SYMBOL(ptlrpc_prep_req_pool);
836
837 /**
838  * Same as ptlrpc_prep_req_pool, but without pool
839  */
840 struct ptlrpc_request *
841 ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
842                 __u32 *lengths, char **bufs)
843 {
844         return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
845                                     NULL);
846 }
847 EXPORT_SYMBOL(ptlrpc_prep_req);
848
849 /**
850  * Allocate and initialize new request set structure.
851  * Returns a pointer to the newly allocated set structure or NULL on error.
852  */
853 struct ptlrpc_request_set *ptlrpc_prep_set(void)
854 {
855         struct ptlrpc_request_set *set;
856
857         ENTRY;
858         OBD_ALLOC(set, sizeof *set);
859         if (!set)
860                 RETURN(NULL);
861         atomic_set(&set->set_refcount, 1);
862         INIT_LIST_HEAD(&set->set_requests);
863         init_waitqueue_head(&set->set_waitq);
864         atomic_set(&set->set_new_count, 0);
865         atomic_set(&set->set_remaining, 0);
866         spin_lock_init(&set->set_new_req_lock);
867         INIT_LIST_HEAD(&set->set_new_requests);
868         INIT_LIST_HEAD(&set->set_cblist);
869         set->set_max_inflight = UINT_MAX;
870         set->set_producer     = NULL;
871         set->set_producer_arg = NULL;
872         set->set_rc           = 0;
873
874         RETURN(set);
875 }
876 EXPORT_SYMBOL(ptlrpc_prep_set);
877
878 /**
879  * Allocate and initialize new request set structure with flow control
880  * extension. This extension allows to control the number of requests in-flight
881  * for the whole set. A callback function to generate requests must be provided
882  * and the request set will keep the number of requests sent over the wire to
883  * @max_inflight.
884  * Returns a pointer to the newly allocated set structure or NULL on error.
885  */
886 struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
887                                              void *arg)
888
889 {
890         struct ptlrpc_request_set *set;
891
892         set = ptlrpc_prep_set();
893         if (!set)
894                 RETURN(NULL);
895
896         set->set_max_inflight  = max;
897         set->set_producer      = func;
898         set->set_producer_arg  = arg;
899
900         RETURN(set);
901 }
902 EXPORT_SYMBOL(ptlrpc_prep_fcset);
903
904 /**
905  * Wind down and free request set structure previously allocated with
906  * ptlrpc_prep_set.
907  * Ensures that all requests on the set have completed and removes
908  * all requests from the request list in a set.
909  * If any unsent request happen to be on the list, pretends that they got
910  * an error in flight and calls their completion handler.
911  */
912 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
913 {
914         struct list_head        *tmp;
915         struct list_head        *next;
916         int                      expected_phase;
917         int                      n = 0;
918         ENTRY;
919
920         /* Requests on the set should either all be completed, or all be new */
921         expected_phase = (atomic_read(&set->set_remaining) == 0) ?
922                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
923         list_for_each(tmp, &set->set_requests) {
924                 struct ptlrpc_request *req =
925                         list_entry(tmp, struct ptlrpc_request,
926                                    rq_set_chain);
927
928                 LASSERT(req->rq_phase == expected_phase);
929                 n++;
930         }
931
932         LASSERTF(atomic_read(&set->set_remaining) == 0 ||
933                  atomic_read(&set->set_remaining) == n, "%d / %d\n",
934                  atomic_read(&set->set_remaining), n);
935
936         list_for_each_safe(tmp, next, &set->set_requests) {
937                 struct ptlrpc_request *req =
938                         list_entry(tmp, struct ptlrpc_request,
939                                    rq_set_chain);
940                 list_del_init(&req->rq_set_chain);
941
942                 LASSERT(req->rq_phase == expected_phase);
943
944                 if (req->rq_phase == RQ_PHASE_NEW) {
945                         ptlrpc_req_interpret(NULL, req, -EBADR);
946                         atomic_dec(&set->set_remaining);
947                 }
948
949                 spin_lock(&req->rq_lock);
950                 req->rq_set = NULL;
951                 req->rq_invalid_rqset = 0;
952                 spin_unlock(&req->rq_lock);
953
954                 ptlrpc_req_finished (req);
955         }
956
957         LASSERT(atomic_read(&set->set_remaining) == 0);
958
959         ptlrpc_reqset_put(set);
960         EXIT;
961 }
962 EXPORT_SYMBOL(ptlrpc_set_destroy);
963
964 /**
965  * Add a callback function \a fn to the set.
966  * This function would be called when all requests on this set are completed.
967  * The function will be passed \a data argument.
968  */
969 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
970                       set_interpreter_func fn, void *data)
971 {
972         struct ptlrpc_set_cbdata *cbdata;
973
974         OBD_ALLOC_PTR(cbdata);
975         if (cbdata == NULL)
976                 RETURN(-ENOMEM);
977
978         cbdata->psc_interpret = fn;
979         cbdata->psc_data = data;
980         list_add_tail(&cbdata->psc_item, &set->set_cblist);
981
982         RETURN(0);
983 }
984 EXPORT_SYMBOL(ptlrpc_set_add_cb);
985
986 /**
987  * Add a new request to the general purpose request set.
988  * Assumes request reference from the caller.
989  */
990 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
991                         struct ptlrpc_request *req)
992 {
993         LASSERT(list_empty(&req->rq_set_chain));
994
995         /* The set takes over the caller's request reference */
996         list_add_tail(&req->rq_set_chain, &set->set_requests);
997         req->rq_set = set;
998         atomic_inc(&set->set_remaining);
999         req->rq_queued_time = cfs_time_current();
1000
1001         if (req->rq_reqmsg != NULL)
1002                 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
1003
1004         if (set->set_producer != NULL)
1005                 /* If the request set has a producer callback, the RPC must be
1006                  * sent straight away */
1007                 ptlrpc_send_new_req(req);
1008 }
1009 EXPORT_SYMBOL(ptlrpc_set_add_req);
1010
1011 /**
1012  * Add a request to a request with dedicated server thread
1013  * and wake the thread to make any necessary processing.
1014  * Currently only used for ptlrpcd.
1015  */
1016 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
1017                            struct ptlrpc_request *req)
1018 {
1019         struct ptlrpc_request_set *set = pc->pc_set;
1020         int count, i;
1021
1022         LASSERT(req->rq_set == NULL);
1023         LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
1024
1025         spin_lock(&set->set_new_req_lock);
1026         /*
1027          * The set takes over the caller's request reference.
1028          */
1029         req->rq_set = set;
1030         req->rq_queued_time = cfs_time_current();
1031         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
1032         count = atomic_inc_return(&set->set_new_count);
1033         spin_unlock(&set->set_new_req_lock);
1034
1035         /* Only need to call wakeup once for the first entry. */
1036         if (count == 1) {
1037                 wake_up(&set->set_waitq);
1038
1039                 /* XXX: It maybe unnecessary to wakeup all the partners. But to
1040                  *      guarantee the async RPC can be processed ASAP, we have
1041                  *      no other better choice. It maybe fixed in future. */
1042                 for (i = 0; i < pc->pc_npartners; i++)
1043                         wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
1044         }
1045 }
1046 EXPORT_SYMBOL(ptlrpc_set_add_new_req);
1047
1048 /**
1049  * Based on the current state of the import, determine if the request
1050  * can be sent, is an error, or should be delayed.
1051  *
1052  * Returns true if this request should be delayed. If false, and
1053  * *status is set, then the request can not be sent and *status is the
1054  * error code.  If false and status is 0, then request can be sent.
1055  *
1056  * The imp->imp_lock must be held.
1057  */
1058 static int ptlrpc_import_delay_req(struct obd_import *imp,
1059                                    struct ptlrpc_request *req, int *status)
1060 {
1061         int delay = 0;
1062         ENTRY;
1063
1064         LASSERT (status != NULL);
1065         *status = 0;
1066
1067         if (req->rq_ctx_init || req->rq_ctx_fini) {
1068                 /* always allow ctx init/fini rpc go through */
1069         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
1070                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
1071                 *status = -EIO;
1072         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
1073                 /* pings may safely race with umount */
1074                 DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ?
1075                           D_HA : D_ERROR, req, "IMP_CLOSED ");
1076                 *status = -EIO;
1077         } else if (ptlrpc_send_limit_expired(req)) {
1078                 /* probably doesn't need to be a D_ERROR after initial testing */
1079                 DEBUG_REQ(D_ERROR, req, "send limit expired ");
1080                 *status = -EIO;
1081         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
1082                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
1083                 /* allow CONNECT even if import is invalid */ ;
1084                 if (atomic_read(&imp->imp_inval_count) != 0) {
1085                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1086                         *status = -EIO;
1087                 }
1088         } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
1089                 if (!imp->imp_deactive)
1090                         DEBUG_REQ(D_NET, req, "IMP_INVALID");
1091                 *status = -ESHUTDOWN; /* bz 12940 */
1092         } else if (req->rq_import_generation != imp->imp_generation) {
1093                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
1094                 *status = -EIO;
1095         } else if (req->rq_send_state != imp->imp_state) {
1096                 /* invalidate in progress - any requests should be drop */
1097                 if (atomic_read(&imp->imp_inval_count) != 0) {
1098                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
1099                         *status = -EIO;
1100                 } else if (imp->imp_dlm_fake || req->rq_no_delay) {
1101                         *status = -EWOULDBLOCK;
1102                 } else if (req->rq_allow_replay &&
1103                           (imp->imp_state == LUSTRE_IMP_REPLAY ||
1104                            imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
1105                            imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
1106                            imp->imp_state == LUSTRE_IMP_RECOVER)) {
1107                         DEBUG_REQ(D_HA, req, "allow during recovery.\n");
1108                 } else {
1109                         delay = 1;
1110                 }
1111         }
1112
1113         RETURN(delay);
1114 }
1115
1116 /**
1117  * Decide if the eror message regarding provided request \a req
1118  * should be printed to the console or not.
1119  * Makes it's decision on request status and other properties.
1120  * Returns 1 to print error on the system console or 0 if not.
1121  */
1122 static int ptlrpc_console_allow(struct ptlrpc_request *req)
1123 {
1124         __u32 opc;
1125         int err;
1126
1127         LASSERT(req->rq_reqmsg != NULL);
1128         opc = lustre_msg_get_opc(req->rq_reqmsg);
1129
1130         /* Suppress particular reconnect errors which are to be expected.  No
1131          * errors are suppressed for the initial connection on an import */
1132         if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
1133             (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
1134
1135                 /* Suppress timed out reconnect requests */
1136                 if (req->rq_timedout)
1137                         return 0;
1138
1139                 /* Suppress unavailable/again reconnect requests */
1140                 err = lustre_msg_get_status(req->rq_repmsg);
1141                 if (err == -ENODEV || err == -EAGAIN)
1142                         return 0;
1143         }
1144
1145         return 1;
1146 }
1147
1148 /**
1149  * Check request processing status.
1150  * Returns the status.
1151  */
1152 static int ptlrpc_check_status(struct ptlrpc_request *req)
1153 {
1154         int err;
1155         ENTRY;
1156
1157         err = lustre_msg_get_status(req->rq_repmsg);
1158         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1159                 struct obd_import *imp = req->rq_import;
1160                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1161                 if (ptlrpc_console_allow(req))
1162                         LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
1163                                            " operation %s failed with %d.\n",
1164                                            imp->imp_obd->obd_name,
1165                                            libcfs_nid2str(
1166                                            imp->imp_connection->c_peer.nid),
1167                                            ll_opcode2str(opc), err);
1168                 RETURN(err < 0 ? err : -EINVAL);
1169         }
1170
1171         if (err < 0) {
1172                 DEBUG_REQ(D_INFO, req, "status is %d", err);
1173         } else if (err > 0) {
1174                 /* XXX: translate this error from net to host */
1175                 DEBUG_REQ(D_INFO, req, "status is %d", err);
1176         }
1177
1178         RETURN(err);
1179 }
1180
1181 /**
1182  * save pre-versions of objects into request for replay.
1183  * Versions are obtained from server reply.
1184  * used for VBR.
1185  */
1186 static void ptlrpc_save_versions(struct ptlrpc_request *req)
1187 {
1188         struct lustre_msg *repmsg = req->rq_repmsg;
1189         struct lustre_msg *reqmsg = req->rq_reqmsg;
1190         __u64 *versions = lustre_msg_get_versions(repmsg);
1191         ENTRY;
1192
1193         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
1194                 return;
1195
1196         LASSERT(versions);
1197         lustre_msg_set_versions(reqmsg, versions);
1198         CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n",
1199                versions[0], versions[1]);
1200
1201         EXIT;
1202 }
1203
1204 /**
1205  * Callback function called when client receives RPC reply for \a req.
1206  * Returns 0 on success or error code.
1207  * The return alue would be assigned to req->rq_status by the caller
1208  * as request processing status.
1209  * This function also decides if the request needs to be saved for later replay.
1210  */
1211 static int after_reply(struct ptlrpc_request *req)
1212 {
1213         struct obd_import *imp = req->rq_import;
1214         struct obd_device *obd = req->rq_import->imp_obd;
1215         int rc;
1216         struct timeval work_start;
1217         long timediff;
1218         ENTRY;
1219
1220         LASSERT(obd != NULL);
1221         /* repbuf must be unlinked */
1222         LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
1223
1224         if (req->rq_reply_truncate) {
1225                 if (ptlrpc_no_resend(req)) {
1226                         DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
1227                                   " expected: %d, actual size: %d",
1228                                   req->rq_nob_received, req->rq_repbuf_len);
1229                         RETURN(-EOVERFLOW);
1230                 }
1231
1232                 sptlrpc_cli_free_repbuf(req);
1233                 /* Pass the required reply buffer size (include
1234                  * space for early reply).
1235                  * NB: no need to roundup because alloc_repbuf
1236                  * will roundup it */
1237                 req->rq_replen       = req->rq_nob_received;
1238                 req->rq_nob_received = 0;
1239                 spin_lock(&req->rq_lock);
1240                 req->rq_resend       = 1;
1241                 spin_unlock(&req->rq_lock);
1242                 RETURN(0);
1243         }
1244
1245         /*
1246          * NB Until this point, the whole of the incoming message,
1247          * including buflens, status etc is in the sender's byte order.
1248          */
1249         rc = sptlrpc_cli_unwrap_reply(req);
1250         if (rc) {
1251                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
1252                 RETURN(rc);
1253         }
1254
1255         /*
1256          * Security layer unwrap might ask resend this request.
1257          */
1258         if (req->rq_resend)
1259                 RETURN(0);
1260
1261         rc = unpack_reply(req);
1262         if (rc)
1263                 RETURN(rc);
1264
1265         /* retry indefinitely on EINPROGRESS */
1266         if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
1267             ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
1268                 time_t  now = cfs_time_current_sec();
1269
1270                 DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
1271                 req->rq_resend = 1;
1272                 req->rq_nr_resend++;
1273
1274                 /* allocate new xid to avoid reply reconstruction */
1275                 if (!req->rq_bulk) {
1276                         /* new xid is already allocated for bulk in
1277                          * ptlrpc_check_set() */
1278                         req->rq_xid = ptlrpc_next_xid();
1279                         DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
1280                                   "resend on EINPROGRESS");
1281                 }
1282
1283                 /* Readjust the timeout for current conditions */
1284                 ptlrpc_at_set_req_timeout(req);
1285                 /* delay resend to give a chance to the server to get ready.
1286                  * The delay is increased by 1s on every resend and is capped to
1287                  * the current request timeout (i.e. obd_timeout if AT is off,
1288                  * or AT service time x 125% + 5s, see at_est2timeout) */
1289                 if (req->rq_nr_resend > req->rq_timeout)
1290                         req->rq_sent = now + req->rq_timeout;
1291                 else
1292                         req->rq_sent = now + req->rq_nr_resend;
1293
1294                 RETURN(0);
1295         }
1296
1297         do_gettimeofday(&work_start);
1298         timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
1299         if (obd->obd_svc_stats != NULL) {
1300                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1301                                     timediff);
1302                 ptlrpc_lprocfs_rpc_sent(req, timediff);
1303         }
1304
1305         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1306             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1307                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1308                           lustre_msg_get_type(req->rq_repmsg));
1309                 RETURN(-EPROTO);
1310         }
1311
1312         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
1313                 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
1314         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1315         ptlrpc_at_adj_net_latency(req,
1316                                   lustre_msg_get_service_time(req->rq_repmsg));
1317
1318         rc = ptlrpc_check_status(req);
1319         imp->imp_connect_error = rc;
1320
1321         if (rc) {
1322                 /*
1323                  * Either we've been evicted, or the server has failed for
1324                  * some reason. Try to reconnect, and if that fails, punt to
1325                  * the upcall.
1326                  */
1327                 if (ll_rpc_recoverable_error(rc)) {
1328                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1329                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1330                                 RETURN(rc);
1331                         }
1332                         ptlrpc_request_handle_notconn(req);
1333                         RETURN(rc);
1334                 }
1335         } else {
1336                 /*
1337                  * Let's look if server sent slv. Do it only for RPC with
1338                  * rc == 0.
1339                  */
1340                 ldlm_cli_update_pool(req);
1341         }
1342
1343         /*
1344          * Store transno in reqmsg for replay.
1345          */
1346         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1347                 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1348                 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1349         }
1350
1351         if (imp->imp_replayable) {
1352                 spin_lock(&imp->imp_lock);
1353                 /*
1354                  * No point in adding already-committed requests to the replay
1355                  * list, we will just remove them immediately. b=9829
1356                  */
1357                 if (req->rq_transno != 0 &&
1358                     (req->rq_transno >
1359                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1360                      req->rq_replay)) {
1361                         /** version recovery */
1362                         ptlrpc_save_versions(req);
1363                         ptlrpc_retain_replayable_request(req, imp);
1364                 } else if (req->rq_commit_cb != NULL &&
1365                            list_empty(&req->rq_replay_list)) {
1366                         /* NB: don't call rq_commit_cb if it's already on
1367                          * rq_replay_list, ptlrpc_free_committed() will call
1368                          * it later, see LU-3618 for details */
1369                         spin_unlock(&imp->imp_lock);
1370                         req->rq_commit_cb(req);
1371                         spin_lock(&imp->imp_lock);
1372                 }
1373
1374                 /*
1375                  * Replay-enabled imports return commit-status information.
1376                  */
1377                 if (lustre_msg_get_last_committed(req->rq_repmsg)) {
1378                         imp->imp_peer_committed_transno =
1379                                 lustre_msg_get_last_committed(req->rq_repmsg);
1380                 }
1381
1382                 ptlrpc_free_committed(imp);
1383
1384                 if (!list_empty(&imp->imp_replay_list)) {
1385                         struct ptlrpc_request *last;
1386
1387                         last = list_entry(imp->imp_replay_list.prev,
1388                                           struct ptlrpc_request,
1389                                           rq_replay_list);
1390                         /*
1391                          * Requests with rq_replay stay on the list even if no
1392                          * commit is expected.
1393                          */
1394                         if (last->rq_transno > imp->imp_peer_committed_transno)
1395                                 ptlrpc_pinger_commit_expected(imp);
1396                 }
1397
1398                 spin_unlock(&imp->imp_lock);
1399         }
1400
1401         RETURN(rc);
1402 }
1403
1404 /**
1405  * Helper function to send request \a req over the network for the first time
1406  * Also adjusts request phase.
1407  * Returns 0 on success or error code.
1408  */
1409 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1410 {
1411         struct obd_import     *imp = req->rq_import;
1412         int rc;
1413         ENTRY;
1414
1415         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1416         if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
1417             (!req->rq_generation_set ||
1418              req->rq_import_generation == imp->imp_generation))
1419                 RETURN (0);
1420
1421         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1422
1423         spin_lock(&imp->imp_lock);
1424
1425         if (!req->rq_generation_set)
1426                 req->rq_import_generation = imp->imp_generation;
1427
1428         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1429                 spin_lock(&req->rq_lock);
1430                 req->rq_waiting = 1;
1431                 spin_unlock(&req->rq_lock);
1432
1433                 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
1434                           "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
1435                           ptlrpc_import_state_name(req->rq_send_state),
1436                           ptlrpc_import_state_name(imp->imp_state));
1437                 LASSERT(list_empty(&req->rq_list));
1438                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1439                 atomic_inc(&req->rq_import->imp_inflight);
1440                 spin_unlock(&imp->imp_lock);
1441                 RETURN(0);
1442         }
1443
1444         if (rc != 0) {
1445                 spin_unlock(&imp->imp_lock);
1446                 req->rq_status = rc;
1447                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1448                 RETURN(rc);
1449         }
1450
1451         LASSERT(list_empty(&req->rq_list));
1452         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1453         atomic_inc(&req->rq_import->imp_inflight);
1454         spin_unlock(&imp->imp_lock);
1455
1456         lustre_msg_set_status(req->rq_reqmsg, current_pid());
1457
1458         rc = sptlrpc_req_refresh_ctx(req, -1);
1459         if (rc) {
1460                 if (req->rq_err) {
1461                         req->rq_status = rc;
1462                         RETURN(1);
1463                 } else {
1464                         spin_lock(&req->rq_lock);
1465                         req->rq_wait_ctx = 1;
1466                         spin_unlock(&req->rq_lock);
1467                         RETURN(0);
1468                 }
1469         }
1470
1471         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
1472                " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
1473                imp->imp_obd->obd_uuid.uuid,
1474                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1475                libcfs_nid2str(imp->imp_connection->c_peer.nid),
1476                lustre_msg_get_opc(req->rq_reqmsg));
1477
1478         rc = ptl_send_rpc(req, 0);
1479         if (rc) {
1480                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
1481                 spin_lock(&req->rq_lock);
1482                 req->rq_net_err = 1;
1483                 spin_unlock(&req->rq_lock);
1484                 RETURN(rc);
1485         }
1486         RETURN(0);
1487 }
1488
1489 static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
1490 {
1491         int remaining, rc;
1492         ENTRY;
1493
1494         LASSERT(set->set_producer != NULL);
1495
1496         remaining = atomic_read(&set->set_remaining);
1497
1498         /* populate the ->set_requests list with requests until we
1499          * reach the maximum number of RPCs in flight for this set */
1500         while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
1501                 rc = set->set_producer(set, set->set_producer_arg);
1502                 if (rc == -ENOENT) {
1503                         /* no more RPC to produce */
1504                         set->set_producer     = NULL;
1505                         set->set_producer_arg = NULL;
1506                         RETURN(0);
1507                 }
1508         }
1509
1510         RETURN((atomic_read(&set->set_remaining) - remaining));
1511 }
1512
1513 /**
1514  * this sends any unsent RPCs in \a set and returns 1 if all are sent
1515  * and no more replies are expected.
1516  * (it is possible to get less replies than requests sent e.g. due to timed out
1517  * requests or requests that we had trouble to send out)
1518  *
1519  * NOTE: This function contains a potential schedule point (cond_resched()).
1520  */
1521 int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
1522 {
1523         struct list_head *tmp, *next;
1524         int force_timer_recalc = 0;
1525         ENTRY;
1526
1527         if (atomic_read(&set->set_remaining) == 0)
1528                 RETURN(1);
1529
1530         list_for_each_safe(tmp, next, &set->set_requests) {
1531                 struct ptlrpc_request *req =
1532                         list_entry(tmp, struct ptlrpc_request,
1533                                    rq_set_chain);
1534                 struct obd_import *imp = req->rq_import;
1535                 int unregistered = 0;
1536                 int rc = 0;
1537
1538                 /* This schedule point is mainly for the ptlrpcd caller of this
1539                  * function.  Most ptlrpc sets are not long-lived and unbounded
1540                  * in length, but at the least the set used by the ptlrpcd is.
1541                  * Since the processing time is unbounded, we need to insert an
1542                  * explicit schedule point to make the thread well-behaved.
1543                  */
1544                 cond_resched();
1545
1546                 if (req->rq_phase == RQ_PHASE_NEW &&
1547                     ptlrpc_send_new_req(req)) {
1548                         force_timer_recalc = 1;
1549                 }
1550
1551                 /* delayed send - skip */
1552                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1553                         continue;
1554
1555                 /* delayed resend - skip */
1556                 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
1557                     req->rq_sent > cfs_time_current_sec())
1558                         continue;
1559
1560                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1561                       req->rq_phase == RQ_PHASE_BULK ||
1562                       req->rq_phase == RQ_PHASE_INTERPRET ||
1563                       req->rq_phase == RQ_PHASE_UNREGISTERING ||
1564                       req->rq_phase == RQ_PHASE_COMPLETE)) {
1565                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1566                         LBUG();
1567                 }
1568
1569                 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1570                         LASSERT(req->rq_next_phase != req->rq_phase);
1571                         LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1572
1573                         /*
1574                          * Skip processing until reply is unlinked. We
1575                          * can't return to pool before that and we can't
1576                          * call interpret before that. We need to make
1577                          * sure that all rdma transfers finished and will
1578                          * not corrupt any data.
1579                          */
1580                         if (ptlrpc_client_recv_or_unlink(req) ||
1581                             ptlrpc_client_bulk_active(req))
1582                                 continue;
1583
1584                         /*
1585                          * Turn fail_loc off to prevent it from looping
1586                          * forever.
1587                          */
1588                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1589                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
1590                                                      OBD_FAIL_ONCE);
1591                         }
1592                         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1593                                 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
1594                                                      OBD_FAIL_ONCE);
1595                         }
1596
1597                         /*
1598                          * Move to next phase if reply was successfully
1599                          * unlinked.
1600                          */
1601                         ptlrpc_rqphase_move(req, req->rq_next_phase);
1602                 }
1603
1604                 if (req->rq_phase == RQ_PHASE_COMPLETE)
1605                         continue;
1606
1607                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1608                         GOTO(interpret, req->rq_status);
1609
1610                 /*
1611                  * Note that this also will start async reply unlink.
1612                  */
1613                 if (req->rq_net_err && !req->rq_timedout) {
1614                         ptlrpc_expire_one_request(req, 1);
1615
1616                         /*
1617                          * Check if we still need to wait for unlink.
1618                          */
1619                         if (ptlrpc_client_recv_or_unlink(req) ||
1620                             ptlrpc_client_bulk_active(req))
1621                                 continue;
1622                         /* If there is no need to resend, fail it now. */
1623                         if (req->rq_no_resend) {
1624                                 if (req->rq_status == 0)
1625                                         req->rq_status = -EIO;
1626                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1627                                 GOTO(interpret, req->rq_status);
1628                         } else {
1629                                 continue;
1630                         }
1631                 }
1632
1633                 if (req->rq_err) {
1634                         spin_lock(&req->rq_lock);
1635                         req->rq_replied = 0;
1636                         spin_unlock(&req->rq_lock);
1637                         if (req->rq_status == 0)
1638                                 req->rq_status = -EIO;
1639                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1640                         GOTO(interpret, req->rq_status);
1641                 }
1642
1643                 /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
1644                  * so it sets rq_intr regardless of individual rpc
1645                  * timeouts. The synchronous IO waiting path sets 
1646                  * rq_intr irrespective of whether ptlrpcd
1647                  * has seen a timeout.  Our policy is to only interpret
1648                  * interrupted rpcs after they have timed out, so we
1649                  * need to enforce that here.
1650                  */
1651
1652                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
1653                                      req->rq_wait_ctx)) {
1654                         req->rq_status = -EINTR;
1655                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1656                         GOTO(interpret, req->rq_status);
1657                 }
1658
1659                 if (req->rq_phase == RQ_PHASE_RPC) {
1660                         if (req->rq_timedout || req->rq_resend ||
1661                             req->rq_waiting || req->rq_wait_ctx) {
1662                                 int status;
1663
1664                                 if (!ptlrpc_unregister_reply(req, 1)) {
1665                                         ptlrpc_unregister_bulk(req, 1);
1666                                         continue;
1667                                 }
1668
1669                                 spin_lock(&imp->imp_lock);
1670                                 if (ptlrpc_import_delay_req(imp, req, &status)){
1671                                         /* put on delay list - only if we wait
1672                                          * recovery finished - before send */
1673                                         list_del_init(&req->rq_list);
1674                                         list_add_tail(&req->rq_list,
1675                                                           &imp->
1676                                                           imp_delayed_list);
1677                                         spin_unlock(&imp->imp_lock);
1678                                         continue;
1679                                 }
1680
1681                                 if (status != 0)  {
1682                                         req->rq_status = status;
1683                                         ptlrpc_rqphase_move(req,
1684                                                 RQ_PHASE_INTERPRET);
1685                                         spin_unlock(&imp->imp_lock);
1686                                         GOTO(interpret, req->rq_status);
1687                                 }
1688                                 if (ptlrpc_no_resend(req) &&
1689                                     !req->rq_wait_ctx) {
1690                                         req->rq_status = -ENOTCONN;
1691                                         ptlrpc_rqphase_move(req,
1692                                                             RQ_PHASE_INTERPRET);
1693                                         spin_unlock(&imp->imp_lock);
1694                                         GOTO(interpret, req->rq_status);
1695                                 }
1696
1697                                 list_del_init(&req->rq_list);
1698                                 list_add_tail(&req->rq_list,
1699                                                   &imp->imp_sending_list);
1700
1701                                 spin_unlock(&imp->imp_lock);
1702
1703                                 spin_lock(&req->rq_lock);
1704                                 req->rq_waiting = 0;
1705                                 spin_unlock(&req->rq_lock);
1706
1707                                 if (req->rq_timedout || req->rq_resend) {
1708                                         /* This is re-sending anyways,
1709                                          * let's mark req as resend. */
1710                                         spin_lock(&req->rq_lock);
1711                                         req->rq_resend = 1;
1712                                         spin_unlock(&req->rq_lock);
1713                                         if (req->rq_bulk) {
1714                                                 __u64 old_xid;
1715
1716                                                 if (!ptlrpc_unregister_bulk(req, 1))
1717                                                         continue;
1718
1719                                                 /* ensure previous bulk fails */
1720                                                 old_xid = req->rq_xid;
1721                                                 req->rq_xid = ptlrpc_next_xid();
1722                                                 CDEBUG(D_HA, "resend bulk "
1723                                                        "old x"LPU64
1724                                                        " new x"LPU64"\n",
1725                                                        old_xid, req->rq_xid);
1726                                         }
1727                                 }
1728                                 /*
1729                                  * rq_wait_ctx is only touched by ptlrpcd,
1730                                  * so no lock is needed here.
1731                                  */
1732                                 status = sptlrpc_req_refresh_ctx(req, -1);
1733                                 if (status) {
1734                                         if (req->rq_err) {
1735                                                 req->rq_status = status;
1736                                                 spin_lock(&req->rq_lock);
1737                                                 req->rq_wait_ctx = 0;
1738                                                 spin_unlock(&req->rq_lock);
1739                                                 force_timer_recalc = 1;
1740                                         } else {
1741                                                 spin_lock(&req->rq_lock);
1742                                                 req->rq_wait_ctx = 1;
1743                                                 spin_unlock(&req->rq_lock);
1744                                         }
1745
1746                                         continue;
1747                                 } else {
1748                                         spin_lock(&req->rq_lock);
1749                                         req->rq_wait_ctx = 0;
1750                                         spin_unlock(&req->rq_lock);
1751                                 }
1752
1753                                 rc = ptl_send_rpc(req, 0);
1754                                 if (rc) {
1755                                         DEBUG_REQ(D_HA, req,
1756                                                   "send failed: rc = %d", rc);
1757                                         force_timer_recalc = 1;
1758                                         spin_lock(&req->rq_lock);
1759                                         req->rq_net_err = 1;
1760                                         spin_unlock(&req->rq_lock);
1761                                         continue;
1762                                 }
1763                                 /* need to reset the timeout */
1764                                 force_timer_recalc = 1;
1765                         }
1766
1767                         spin_lock(&req->rq_lock);
1768
1769                         if (ptlrpc_client_early(req)) {
1770                                 ptlrpc_at_recv_early_reply(req);
1771                                 spin_unlock(&req->rq_lock);
1772                                 continue;
1773                         }
1774
1775                         /* Still waiting for a reply? */
1776                         if (ptlrpc_client_recv(req)) {
1777                                 spin_unlock(&req->rq_lock);
1778                                 continue;
1779                         }
1780
1781                         /* Did we actually receive a reply? */
1782                         if (!ptlrpc_client_replied(req)) {
1783                                 spin_unlock(&req->rq_lock);
1784                                 continue;
1785                         }
1786
1787                         spin_unlock(&req->rq_lock);
1788
1789                         /* unlink from net because we are going to
1790                          * swab in-place of reply buffer */
1791                         unregistered = ptlrpc_unregister_reply(req, 1);
1792                         if (!unregistered)
1793                                 continue;
1794
1795                         req->rq_status = after_reply(req);
1796                         if (req->rq_resend)
1797                                 continue;
1798
1799                         /* If there is no bulk associated with this request,
1800                          * then we're done and should let the interpreter
1801                          * process the reply. Similarly if the RPC returned
1802                          * an error, and therefore the bulk will never arrive.
1803                          */
1804                         if (req->rq_bulk == NULL || req->rq_status < 0) {
1805                                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1806                                 GOTO(interpret, req->rq_status);
1807                         }
1808
1809                         ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
1810                 }
1811
1812                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
1813                 if (ptlrpc_client_bulk_active(req))
1814                         continue;
1815
1816                 if (req->rq_bulk->bd_failure) {
1817                         /* The RPC reply arrived OK, but the bulk screwed
1818                          * up!  Dead weird since the server told us the RPC
1819                          * was good after getting the REPLY for her GET or
1820                          * the ACK for her PUT. */
1821                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1822                         req->rq_status = -EIO;
1823                 }
1824
1825                 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1826
1827         interpret:
1828                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
1829
1830                 /* This moves to "unregistering" phase we need to wait for
1831                  * reply unlink. */
1832                 if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
1833                         /* start async bulk unlink too */
1834                         ptlrpc_unregister_bulk(req, 1);
1835                         continue;
1836                 }
1837
1838                 if (!ptlrpc_unregister_bulk(req, 1))
1839                         continue;
1840
1841                 /* When calling interpret receiving already should be
1842                  * finished. */
1843                 LASSERT(!req->rq_receiving_reply);
1844
1845                 ptlrpc_req_interpret(env, req, req->rq_status);
1846
1847                 if (ptlrpcd_check_work(req)) {
1848                         atomic_dec(&set->set_remaining);
1849                         continue;
1850                 }
1851                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
1852
1853                 CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
1854                         "Completed RPC pname:cluuid:pid:xid:nid:"
1855                         "opc %s:%s:%d:"LPU64":%s:%d\n",
1856                         current_comm(), imp->imp_obd->obd_uuid.uuid,
1857                         lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1858                         libcfs_nid2str(imp->imp_connection->c_peer.nid),
1859                         lustre_msg_get_opc(req->rq_reqmsg));
1860
1861                 spin_lock(&imp->imp_lock);
1862                 /* Request already may be not on sending or delaying list. This
1863                  * may happen in the case of marking it erroneous for the case
1864                  * ptlrpc_import_delay_req(req, status) find it impossible to
1865                  * allow sending this rpc and returns *status != 0. */
1866                 if (!list_empty(&req->rq_list)) {
1867                         list_del_init(&req->rq_list);
1868                         atomic_dec(&imp->imp_inflight);
1869                 }
1870                 spin_unlock(&imp->imp_lock);
1871
1872                 atomic_dec(&set->set_remaining);
1873                 wake_up_all(&imp->imp_recovery_waitq);
1874
1875                 if (set->set_producer) {
1876                         /* produce a new request if possible */
1877                         if (ptlrpc_set_producer(set) > 0)
1878                                 force_timer_recalc = 1;
1879
1880                         /* free the request that has just been completed
1881                          * in order not to pollute set->set_requests */
1882                         list_del_init(&req->rq_set_chain);
1883                         spin_lock(&req->rq_lock);
1884                         req->rq_set = NULL;
1885                         req->rq_invalid_rqset = 0;
1886                         spin_unlock(&req->rq_lock);
1887
1888                         /* record rq_status to compute the final status later */
1889                         if (req->rq_status != 0)
1890                                 set->set_rc = req->rq_status;
1891                         ptlrpc_req_finished(req);
1892                 }
1893         }
1894
1895         /* If we hit an error, we want to recover promptly. */
1896         RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
1897 }
1898 EXPORT_SYMBOL(ptlrpc_check_set);
1899
1900 /**
1901  * Time out request \a req. is \a async_unlink is set, that means do not wait
1902  * until LNet actually confirms network buffer unlinking.
1903  * Return 1 if we should give up further retrying attempts or 0 otherwise.
1904  */
1905 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
1906 {
1907         struct obd_import *imp = req->rq_import;
1908         int rc = 0;
1909         ENTRY;
1910
1911         spin_lock(&req->rq_lock);
1912         req->rq_timedout = 1;
1913         spin_unlock(&req->rq_lock);
1914
1915         DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
1916                   "/real "CFS_DURATION_T"]",
1917                   req->rq_net_err ? "failed due to network error" :
1918                      ((req->rq_real_sent == 0 ||
1919                        cfs_time_before(req->rq_real_sent, req->rq_sent) ||
1920                        cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
1921                       "timed out for sent delay" : "timed out for slow reply"),
1922                   req->rq_sent, req->rq_real_sent);
1923
1924         if (imp != NULL && obd_debug_peer_on_timeout)
1925                 LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
1926
1927         ptlrpc_unregister_reply(req, async_unlink);
1928         ptlrpc_unregister_bulk(req, async_unlink);
1929
1930         if (obd_dump_on_timeout)
1931                 libcfs_debug_dumplog();
1932
1933         if (imp == NULL) {
1934                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
1935                 RETURN(1);
1936         }
1937
1938         atomic_inc(&imp->imp_timeouts);
1939
1940         /* The DLM server doesn't want recovery run on its imports. */
1941         if (imp->imp_dlm_fake)
1942                 RETURN(1);
1943
1944         /* If this request is for recovery or other primordial tasks,
1945          * then error it out here. */
1946         if (req->rq_ctx_init || req->rq_ctx_fini ||
1947             req->rq_send_state != LUSTRE_IMP_FULL ||
1948             imp->imp_obd->obd_no_recov) {
1949                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
1950                           ptlrpc_import_state_name(req->rq_send_state),
1951                           ptlrpc_import_state_name(imp->imp_state));
1952                 spin_lock(&req->rq_lock);
1953                 req->rq_status = -ETIMEDOUT;
1954                 req->rq_err = 1;
1955                 spin_unlock(&req->rq_lock);
1956                 RETURN(1);
1957         }
1958
1959         /* if a request can't be resent we can't wait for an answer after
1960            the timeout */
1961         if (ptlrpc_no_resend(req)) {
1962                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
1963                 rc = 1;
1964         }
1965
1966         ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
1967
1968         RETURN(rc);
1969 }
1970
1971 /**
1972  * Time out all uncompleted requests in request set pointed by \a data
1973  * Callback used when waiting on sets with l_wait_event.
1974  * Always returns 1.
1975  */
1976 int ptlrpc_expired_set(void *data)
1977 {
1978         struct ptlrpc_request_set       *set = data;
1979         struct list_head                *tmp;
1980         time_t                          now = cfs_time_current_sec();
1981         ENTRY;
1982
1983         LASSERT(set != NULL);
1984
1985         /*
1986          * A timeout expired. See which reqs it applies to...
1987          */
1988         list_for_each(tmp, &set->set_requests) {
1989                 struct ptlrpc_request *req =
1990                         list_entry(tmp, struct ptlrpc_request,
1991                                    rq_set_chain);
1992
1993                 /* don't expire request waiting for context */
1994                 if (req->rq_wait_ctx)
1995                         continue;
1996
1997                 /* Request in-flight? */
1998                 if (!((req->rq_phase == RQ_PHASE_RPC &&
1999                        !req->rq_waiting && !req->rq_resend) ||
2000                       (req->rq_phase == RQ_PHASE_BULK)))
2001                         continue;
2002
2003                 if (req->rq_timedout ||     /* already dealt with */
2004                     req->rq_deadline > now) /* not expired */
2005                         continue;
2006
2007                 /* Deal with this guy. Do it asynchronously to not block
2008                  * ptlrpcd thread. */
2009                 ptlrpc_expire_one_request(req, 1);
2010         }
2011
2012         /*
2013          * When waiting for a whole set, we always break out of the
2014          * sleep so we can recalculate the timeout, or enable interrupts
2015          * if everyone's timed out.
2016          */
2017         RETURN(1);
2018 }
2019 EXPORT_SYMBOL(ptlrpc_expired_set);
2020
2021 /**
2022  * Sets rq_intr flag in \a req under spinlock.
2023  */
2024 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
2025 {
2026         spin_lock(&req->rq_lock);
2027         req->rq_intr = 1;
2028         spin_unlock(&req->rq_lock);
2029 }
2030 EXPORT_SYMBOL(ptlrpc_mark_interrupted);
2031
2032 /**
2033  * Interrupts (sets interrupted flag) all uncompleted requests in
2034  * a set \a data. Callback for l_wait_event for interruptible waits.
2035  */
2036 void ptlrpc_interrupted_set(void *data)
2037 {
2038         struct ptlrpc_request_set *set = data;
2039         struct list_head *tmp;
2040
2041         LASSERT(set != NULL);
2042         CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
2043
2044         list_for_each(tmp, &set->set_requests) {
2045                 struct ptlrpc_request *req =
2046                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2047
2048                 if (req->rq_phase != RQ_PHASE_RPC &&
2049                     req->rq_phase != RQ_PHASE_UNREGISTERING)
2050                         continue;
2051
2052                 ptlrpc_mark_interrupted(req);
2053         }
2054 }
2055 EXPORT_SYMBOL(ptlrpc_interrupted_set);
2056
2057 /**
2058  * Get the smallest timeout in the set; this does NOT set a timeout.
2059  */
2060 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
2061 {
2062         struct list_head        *tmp;
2063         time_t                   now = cfs_time_current_sec();
2064         int                      timeout = 0;
2065         struct ptlrpc_request   *req;
2066         int                      deadline;
2067         ENTRY;
2068
2069         list_for_each(tmp, &set->set_requests) {
2070                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2071
2072                 /*
2073                  * Request in-flight?
2074                  */
2075                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
2076                       (req->rq_phase == RQ_PHASE_BULK) ||
2077                       (req->rq_phase == RQ_PHASE_NEW)))
2078                         continue;
2079
2080                 /*
2081                  * Already timed out.
2082                  */
2083                 if (req->rq_timedout)
2084                         continue;
2085
2086                 /*
2087                  * Waiting for ctx.
2088                  */
2089                 if (req->rq_wait_ctx)
2090                         continue;
2091
2092                 if (req->rq_phase == RQ_PHASE_NEW)
2093                         deadline = req->rq_sent;
2094                 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
2095                         deadline = req->rq_sent;
2096                 else
2097                         deadline = req->rq_sent + req->rq_timeout;
2098
2099                 if (deadline <= now)    /* actually expired already */
2100                         timeout = 1;    /* ASAP */
2101                 else if (timeout == 0 || timeout > deadline - now)
2102                         timeout = deadline - now;
2103         }
2104         RETURN(timeout);
2105 }
2106 EXPORT_SYMBOL(ptlrpc_set_next_timeout);
2107
2108 /**
2109  * Send all unset request from the set and then wait untill all
2110  * requests in the set complete (either get a reply, timeout, get an
2111  * error or otherwise be interrupted).
2112  * Returns 0 on success or error code otherwise.
2113  */
2114 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
2115 {
2116         struct list_head            *tmp;
2117         struct ptlrpc_request *req;
2118         struct l_wait_info     lwi;
2119         int                    rc, timeout;
2120         ENTRY;
2121
2122         if (set->set_producer)
2123                 (void)ptlrpc_set_producer(set);
2124         else
2125                 list_for_each(tmp, &set->set_requests) {
2126                         req = list_entry(tmp, struct ptlrpc_request,
2127                                          rq_set_chain);
2128                         if (req->rq_phase == RQ_PHASE_NEW)
2129                                 (void)ptlrpc_send_new_req(req);
2130                 }
2131
2132         if (list_empty(&set->set_requests))
2133                 RETURN(0);
2134
2135         do {
2136                 timeout = ptlrpc_set_next_timeout(set);
2137
2138                 /* wait until all complete, interrupted, or an in-flight
2139                  * req times out */
2140                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
2141                        set, timeout);
2142
2143                 if (timeout == 0 && !cfs_signal_pending())
2144                         /*
2145                          * No requests are in-flight (ether timed out
2146                          * or delayed), so we can allow interrupts.
2147                          * We still want to block for a limited time,
2148                          * so we allow interrupts during the timeout.
2149                          */
2150                         lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1), 
2151                                                    ptlrpc_expired_set,
2152                                                    ptlrpc_interrupted_set, set);
2153                 else
2154                         /*
2155                          * At least one request is in flight, so no
2156                          * interrupts are allowed. Wait until all
2157                          * complete, or an in-flight req times out. 
2158                          */
2159                         lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1),
2160                                           ptlrpc_expired_set, set);
2161
2162                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
2163
2164                 /* LU-769 - if we ignored the signal because it was already
2165                  * pending when we started, we need to handle it now or we risk
2166                  * it being ignored forever */
2167                 if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
2168                     cfs_signal_pending()) {
2169                         sigset_t blocked_sigs =
2170                                            cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
2171
2172                         /* In fact we only interrupt for the "fatal" signals
2173                          * like SIGINT or SIGKILL. We still ignore less
2174                          * important signals since ptlrpc set is not easily
2175                          * reentrant from userspace again */
2176                         if (cfs_signal_pending())
2177                                 ptlrpc_interrupted_set(set);
2178                         cfs_restore_sigs(blocked_sigs);
2179                 }
2180
2181                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
2182
2183                 /* -EINTR => all requests have been flagged rq_intr so next
2184                  * check completes.
2185                  * -ETIMEDOUT => someone timed out.  When all reqs have
2186                  * timed out, signals are enabled allowing completion with
2187                  * EINTR.
2188                  * I don't really care if we go once more round the loop in
2189                  * the error cases -eeb. */
2190                 if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
2191                         list_for_each(tmp, &set->set_requests) {
2192                                 req = list_entry(tmp, struct ptlrpc_request,
2193                                                  rq_set_chain);
2194                                 spin_lock(&req->rq_lock);
2195                                 req->rq_invalid_rqset = 1;
2196                                 spin_unlock(&req->rq_lock);
2197                         }
2198                 }
2199         } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
2200
2201         LASSERT(atomic_read(&set->set_remaining) == 0);
2202
2203         rc = set->set_rc; /* rq_status of already freed requests if any */
2204         list_for_each(tmp, &set->set_requests) {
2205                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2206
2207                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
2208                 if (req->rq_status != 0)
2209                         rc = req->rq_status;
2210         }
2211
2212         if (set->set_interpret != NULL) {
2213                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
2214                         set->set_interpret;
2215                 rc = interpreter (set, set->set_arg, rc);
2216         } else {
2217                 struct ptlrpc_set_cbdata *cbdata, *n;
2218                 int err;
2219
2220                 list_for_each_entry_safe(cbdata, n,
2221                                          &set->set_cblist, psc_item) {
2222                         list_del_init(&cbdata->psc_item);
2223                         err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
2224                         if (err && !rc)
2225                                 rc = err;
2226                         OBD_FREE_PTR(cbdata);
2227                 }
2228         }
2229
2230         RETURN(rc);
2231 }
2232 EXPORT_SYMBOL(ptlrpc_set_wait);
2233
2234 /**
2235  * Helper fuction for request freeing.
2236  * Called when request count reached zero and request needs to be freed.
2237  * Removes request from all sorts of sending/replay lists it might be on,
2238  * frees network buffers if any are present.
2239  * If \a locked is set, that means caller is already holding import imp_lock
2240  * and so we no longer need to reobtain it (for certain lists manipulations)
2241  */
2242 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
2243 {
2244         ENTRY;
2245         if (request == NULL) {
2246                 EXIT;
2247                 return;
2248         }
2249
2250         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
2251         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
2252         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
2253         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
2254         LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
2255         LASSERTF(!request->rq_replay, "req %p\n", request);
2256
2257         req_capsule_fini(&request->rq_pill);
2258
2259         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
2260          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
2261         if (request->rq_import != NULL) {
2262                 if (!locked)
2263                         spin_lock(&request->rq_import->imp_lock);
2264                 list_del_init(&request->rq_replay_list);
2265                 if (!locked)
2266                         spin_unlock(&request->rq_import->imp_lock);
2267         }
2268         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
2269
2270         if (atomic_read(&request->rq_refcount) != 0) {
2271                 DEBUG_REQ(D_ERROR, request,
2272                           "freeing request with nonzero refcount");
2273                 LBUG();
2274         }
2275
2276         if (request->rq_repbuf != NULL)
2277                 sptlrpc_cli_free_repbuf(request);
2278         if (request->rq_export != NULL) {
2279                 class_export_put(request->rq_export);
2280                 request->rq_export = NULL;
2281         }
2282         if (request->rq_import != NULL) {
2283                 class_import_put(request->rq_import);
2284                 request->rq_import = NULL;
2285         }
2286         if (request->rq_bulk != NULL)
2287                 ptlrpc_free_bulk_pin(request->rq_bulk);
2288
2289         if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
2290                 sptlrpc_cli_free_reqbuf(request);
2291
2292         if (request->rq_cli_ctx)
2293                 sptlrpc_req_put_ctx(request, !locked);
2294
2295         if (request->rq_pool)
2296                 __ptlrpc_free_req_to_pool(request);
2297         else
2298                 ptlrpc_request_cache_free(request);
2299         EXIT;
2300 }
2301
2302 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
2303 /**
2304  * Drop one request reference. Must be called with import imp_lock held.
2305  * When reference count drops to zero, reuqest is freed.
2306  */
2307 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
2308 {
2309         assert_spin_locked(&request->rq_import->imp_lock);
2310         (void)__ptlrpc_req_finished(request, 1);
2311 }
2312 EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
2313
2314 /**
2315  * Helper function
2316  * Drops one reference count for request \a request.
2317  * \a locked set indicates that caller holds import imp_lock.
2318  * Frees the request whe reference count reaches zero.
2319  */
2320 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
2321 {
2322         ENTRY;
2323         if (request == NULL)
2324                 RETURN(1);
2325
2326         if (request == LP_POISON ||
2327             request->rq_reqmsg == LP_POISON) {
2328                 CERROR("dereferencing freed request (bug 575)\n");
2329                 LBUG();
2330                 RETURN(1);
2331         }
2332
2333         DEBUG_REQ(D_INFO, request, "refcount now %u",
2334                   atomic_read(&request->rq_refcount) - 1);
2335
2336         if (atomic_dec_and_test(&request->rq_refcount)) {
2337                 __ptlrpc_free_req(request, locked);
2338                 RETURN(1);
2339         }
2340
2341         RETURN(0);
2342 }
2343
2344 /**
2345  * Drops one reference count for a request.
2346  */
2347 void ptlrpc_req_finished(struct ptlrpc_request *request)
2348 {
2349         __ptlrpc_req_finished(request, 0);
2350 }
2351 EXPORT_SYMBOL(ptlrpc_req_finished);
2352
2353 /**
2354  * Returns xid of a \a request
2355  */
2356 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
2357 {
2358         return request->rq_xid;
2359 }
2360 EXPORT_SYMBOL(ptlrpc_req_xid);
2361
2362 /**
2363  * Disengage the client's reply buffer from the network
2364  * NB does _NOT_ unregister any client-side bulk.
2365  * IDEMPOTENT, but _not_ safe against concurrent callers.
2366  * The request owner (i.e. the thread doing the I/O) must call...
2367  * Returns 0 on success or 1 if unregistering cannot be made.
2368  */
2369 int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
2370 {
2371         int                rc;
2372         struct l_wait_info lwi;
2373
2374         /*
2375          * Might sleep.
2376          */
2377         LASSERT(!in_interrupt());
2378
2379         /*
2380          * Let's setup deadline for reply unlink.
2381          */
2382         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
2383             async && request->rq_reply_deadline == 0)
2384                 request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
2385
2386         /*
2387          * Nothing left to do.
2388          */
2389         if (!ptlrpc_client_recv_or_unlink(request))
2390                 RETURN(1);
2391
2392         LNetMDUnlink(request->rq_reply_md_h);
2393
2394         /*
2395          * Let's check it once again.
2396          */
2397         if (!ptlrpc_client_recv_or_unlink(request))
2398                 RETURN(1);
2399
2400         /*
2401          * Move to "Unregistering" phase as reply was not unlinked yet.
2402          */
2403         ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
2404
2405         /*
2406          * Do not wait for unlink to finish.
2407          */
2408         if (async)
2409                 RETURN(0);
2410
2411         /*
2412          * We have to l_wait_event() whatever the result, to give liblustre
2413          * a chance to run reply_in_callback(), and to make sure we've
2414          * unlinked before returning a req to the pool.
2415          */
2416         for (;;) {
2417                 /* The wq argument is ignored by user-space wait_event macros */
2418                 wait_queue_head_t *wq = (request->rq_set != NULL) ?
2419                                         &request->rq_set->set_waitq :
2420                                         &request->rq_reply_waitq;
2421                 /* Network access will complete in finite time but the HUGE
2422                  * timeout lets us CWARN for visibility of sluggish NALs */
2423                 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
2424                                            cfs_time_seconds(1), NULL, NULL);
2425                 rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
2426                                   &lwi);
2427                 if (rc == 0) {
2428                         ptlrpc_rqphase_move(request, request->rq_next_phase);
2429                         RETURN(1);
2430                 }
2431
2432                 LASSERT(rc == -ETIMEDOUT);
2433                 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
2434                           "rvcng=%d unlnk=%d/%d", request->rq_receiving_reply,
2435                           request->rq_req_unlink, request->rq_reply_unlink);
2436         }
2437         RETURN(0);
2438 }
2439 EXPORT_SYMBOL(ptlrpc_unregister_reply);
2440
2441 static void ptlrpc_free_request(struct ptlrpc_request *req)
2442 {
2443         spin_lock(&req->rq_lock);
2444         req->rq_replay = 0;
2445         spin_unlock(&req->rq_lock);
2446
2447         if (req->rq_commit_cb != NULL)
2448                 req->rq_commit_cb(req);
2449         list_del_init(&req->rq_replay_list);
2450
2451         __ptlrpc_req_finished(req, 1);
2452 }
2453
2454 /**
2455  * the request is committed and dropped from the replay list of its import
2456  */
2457 void ptlrpc_request_committed(struct ptlrpc_request *req, int force)
2458 {
2459         struct obd_import       *imp = req->rq_import;
2460
2461         spin_lock(&imp->imp_lock);
2462         if (list_empty(&req->rq_replay_list)) {
2463                 spin_unlock(&imp->imp_lock);
2464                 return;
2465         }
2466
2467         if (force || req->rq_transno <= imp->imp_peer_committed_transno)
2468                 ptlrpc_free_request(req);
2469
2470         spin_unlock(&imp->imp_lock);
2471 }
2472 EXPORT_SYMBOL(ptlrpc_request_committed);
2473
2474 /**
2475  * Iterates through replay_list on import and prunes
2476  * all requests have transno smaller than last_committed for the
2477  * import and don't have rq_replay set.
2478  * Since requests are sorted in transno order, stops when meetign first
2479  * transno bigger than last_committed.
2480  * caller must hold imp->imp_lock
2481  */
2482 void ptlrpc_free_committed(struct obd_import *imp)
2483 {
2484         struct ptlrpc_request   *req, *saved;
2485         struct ptlrpc_request   *last_req = NULL; /* temporary fire escape */
2486         bool                     skip_committed_list = true;
2487         ENTRY;
2488
2489         LASSERT(imp != NULL);
2490         assert_spin_locked(&imp->imp_lock);
2491
2492         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
2493             imp->imp_generation == imp->imp_last_generation_checked) {
2494                 CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
2495                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
2496                 RETURN_EXIT;
2497         }
2498         CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
2499                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
2500                imp->imp_generation);
2501
2502         if (imp->imp_generation != imp->imp_last_generation_checked)
2503                 skip_committed_list = false;
2504
2505         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
2506         imp->imp_last_generation_checked = imp->imp_generation;
2507
2508         list_for_each_entry_safe(req, saved, &imp->imp_replay_list,
2509                                      rq_replay_list) {
2510                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
2511                 LASSERT(req != last_req);
2512                 last_req = req;
2513
2514                 if (req->rq_transno == 0) {
2515                         DEBUG_REQ(D_EMERG, req, "zero transno during replay");
2516                         LBUG();
2517                 }
2518                 if (req->rq_import_generation < imp->imp_generation) {
2519                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
2520                         GOTO(free_req, 0);
2521                 }
2522
2523                 /* not yet committed */
2524                 if (req->rq_transno > imp->imp_peer_committed_transno) {
2525                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
2526                         break;
2527                 }
2528
2529                 if (req->rq_replay) {
2530                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
2531                         list_move_tail(&req->rq_replay_list,
2532                                            &imp->imp_committed_list);
2533                         continue;
2534                 }
2535
2536                 DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
2537                           imp->imp_peer_committed_transno);
2538 free_req:
2539                 ptlrpc_free_request(req);
2540         }
2541
2542         if (skip_committed_list)
2543                 GOTO(out, 0);
2544
2545         list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
2546                                      rq_replay_list) {
2547                 LASSERT(req->rq_transno != 0);
2548                 if (req->rq_import_generation < imp->imp_generation) {
2549                         DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
2550                         ptlrpc_free_request(req);
2551                 }
2552         }
2553 out:
2554         EXIT;
2555 }
2556
2557 void ptlrpc_cleanup_client(struct obd_import *imp)
2558 {
2559         ENTRY;
2560         EXIT;
2561 }
2562 EXPORT_SYMBOL(ptlrpc_cleanup_client);
2563
2564 /**
2565  * Schedule previously sent request for resend.
2566  * For bulk requests we assign new xid (to avoid problems with
2567  * lost replies and therefore several transfers landing into same buffer
2568  * from different sending attempts).
2569  */
2570 void ptlrpc_resend_req(struct ptlrpc_request *req)
2571 {
2572         DEBUG_REQ(D_HA, req, "going to resend");
2573         spin_lock(&req->rq_lock);
2574
2575         /* Request got reply but linked to the import list still.
2576            Let ptlrpc_check_set() to process it. */
2577         if (ptlrpc_client_replied(req)) {
2578                 spin_unlock(&req->rq_lock);
2579                 DEBUG_REQ(D_HA, req, "it has reply, so skip it");
2580                 return;
2581         }
2582
2583         lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
2584         req->rq_status = -EAGAIN;
2585
2586         req->rq_resend = 1;
2587         req->rq_net_err = 0;
2588         req->rq_timedout = 0;
2589         if (req->rq_bulk) {
2590                 __u64 old_xid = req->rq_xid;
2591
2592                 /* ensure previous bulk fails */
2593                 req->rq_xid = ptlrpc_next_xid();
2594                 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
2595                        old_xid, req->rq_xid);
2596         }
2597         ptlrpc_client_wake_req(req);
2598         spin_unlock(&req->rq_lock);
2599 }
2600 EXPORT_SYMBOL(ptlrpc_resend_req);
2601
2602 /* XXX: this function and rq_status are currently unused */
2603 void ptlrpc_restart_req(struct ptlrpc_request *req)
2604 {
2605         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
2606         req->rq_status = -ERESTARTSYS;
2607
2608         spin_lock(&req->rq_lock);
2609         req->rq_restart = 1;
2610         req->rq_timedout = 0;
2611         ptlrpc_client_wake_req(req);
2612         spin_unlock(&req->rq_lock);
2613 }
2614 EXPORT_SYMBOL(ptlrpc_restart_req);
2615
2616 /**
2617  * Grab additional reference on a request \a req
2618  */
2619 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
2620 {
2621         ENTRY;
2622         atomic_inc(&req->rq_refcount);
2623         RETURN(req);
2624 }
2625 EXPORT_SYMBOL(ptlrpc_request_addref);
2626
2627 /**
2628  * Add a request to import replay_list.
2629  * Must be called under imp_lock
2630  */
2631 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2632                                       struct obd_import *imp)
2633 {
2634         struct list_head *tmp;
2635
2636         assert_spin_locked(&imp->imp_lock);
2637
2638         if (req->rq_transno == 0) {
2639                 DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
2640                 LBUG();
2641         }
2642
2643         /* clear this for new requests that were resent as well
2644            as resent replayed requests. */
2645         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2646
2647         /* don't re-add requests that have been replayed */
2648         if (!list_empty(&req->rq_replay_list))
2649                 return;
2650
2651         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
2652
2653         LASSERT(imp->imp_replayable);
2654         /* Balanced in ptlrpc_free_committed, usually. */
2655         ptlrpc_request_addref(req);
2656         list_for_each_prev(tmp, &imp->imp_replay_list) {
2657                 struct ptlrpc_request *iter = list_entry(tmp,
2658                                                          struct ptlrpc_request,
2659                                                          rq_replay_list);
2660
2661                 /* We may have duplicate transnos if we create and then
2662                  * open a file, or for closes retained if to match creating
2663                  * opens, so use req->rq_xid as a secondary key.
2664                  * (See bugs 684, 685, and 428.)
2665                  * XXX no longer needed, but all opens need transnos!
2666                  */
2667                 if (iter->rq_transno > req->rq_transno)
2668                         continue;
2669
2670                 if (iter->rq_transno == req->rq_transno) {
2671                         LASSERT(iter->rq_xid != req->rq_xid);
2672                         if (iter->rq_xid > req->rq_xid)
2673                                 continue;
2674                 }
2675
2676                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
2677                 return;
2678         }
2679
2680         list_add(&req->rq_replay_list, &imp->imp_replay_list);
2681 }
2682 EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
2683
2684 /**
2685  * Send request and wait until it completes.
2686  * Returns request processing status.
2687  */
2688 int ptlrpc_queue_wait(struct ptlrpc_request *req)
2689 {
2690         struct ptlrpc_request_set *set;
2691         int rc;
2692         ENTRY;
2693
2694         LASSERT(req->rq_set == NULL);
2695         LASSERT(!req->rq_receiving_reply);
2696
2697         set = ptlrpc_prep_set();
2698         if (set == NULL) {
2699                 CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
2700                 RETURN(-ENOMEM);
2701         }
2702
2703         /* for distributed debugging */
2704         lustre_msg_set_status(req->rq_reqmsg, current_pid());
2705
2706         /* add a ref for the set (see comment in ptlrpc_set_add_req) */
2707         ptlrpc_request_addref(req);
2708         ptlrpc_set_add_req(set, req);
2709         rc = ptlrpc_set_wait(set);
2710         ptlrpc_set_destroy(set);
2711
2712         RETURN(rc);
2713 }
2714 EXPORT_SYMBOL(ptlrpc_queue_wait);
2715
2716 struct ptlrpc_replay_async_args {
2717         int praa_old_state;
2718         int praa_old_status;
2719 };
2720
2721 /**
2722  * Callback used for replayed requests reply processing.
2723  * In case of succesful reply calls registeresd request replay callback.
2724  * In case of error restart replay process.
2725  */
2726 static int ptlrpc_replay_interpret(const struct lu_env *env,
2727                                    struct ptlrpc_request *req,
2728                                    void * data, int rc)
2729 {
2730         struct ptlrpc_replay_async_args *aa = data;
2731         struct obd_import *imp = req->rq_import;
2732
2733         ENTRY;
2734         atomic_dec(&imp->imp_replay_inflight);
2735
2736         if (!ptlrpc_client_replied(req)) {
2737                 CERROR("request replay timed out, restarting recovery\n");
2738                 GOTO(out, rc = -ETIMEDOUT);
2739         }
2740
2741         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
2742             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
2743              lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
2744                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
2745
2746         /** VBR: check version failure */
2747         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
2748                 /** replay was failed due to version mismatch */
2749                 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
2750                 spin_lock(&imp->imp_lock);
2751                 imp->imp_vbr_failed = 1;
2752                 imp->imp_no_lock_replay = 1;
2753                 spin_unlock(&imp->imp_lock);
2754                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
2755         } else {
2756                 /** The transno had better not change over replay. */
2757                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
2758                          lustre_msg_get_transno(req->rq_repmsg) ||
2759                          lustre_msg_get_transno(req->rq_repmsg) == 0,
2760                          LPX64"/"LPX64"\n",
2761                          lustre_msg_get_transno(req->rq_reqmsg),
2762                          lustre_msg_get_transno(req->rq_repmsg));
2763         }
2764
2765         spin_lock(&imp->imp_lock);
2766         /** if replays by version then gap occur on server, no trust to locks */
2767         if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
2768                 imp->imp_no_lock_replay = 1;
2769         imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
2770         spin_unlock(&imp->imp_lock);
2771         LASSERT(imp->imp_last_replay_transno);
2772
2773         /* transaction number shouldn't be bigger than the latest replayed */
2774         if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
2775                 DEBUG_REQ(D_ERROR, req,
2776                           "Reported transno "LPU64" is bigger than the "
2777                           "replayed one: "LPU64, req->rq_transno,
2778                           lustre_msg_get_transno(req->rq_reqmsg));
2779                 GOTO(out, rc = -EINVAL);
2780         }
2781
2782         DEBUG_REQ(D_HA, req, "got rep");
2783
2784         /* let the callback do fixups, possibly including in the request */
2785         if (req->rq_replay_cb)
2786                 req->rq_replay_cb(req);
2787
2788         if (ptlrpc_client_replied(req) &&
2789             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
2790                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
2791                           lustre_msg_get_status(req->rq_repmsg),
2792                           aa->praa_old_status);
2793         } else {
2794                 /* Put it back for re-replay. */
2795                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
2796         }
2797
2798         /*
2799          * Errors while replay can set transno to 0, but
2800          * imp_last_replay_transno shouldn't be set to 0 anyway
2801          */
2802         if (req->rq_transno == 0)
2803                 CERROR("Transno is 0 during replay!\n");
2804
2805         /* continue with recovery */
2806         rc = ptlrpc_import_recovery_state_machine(imp);
2807  out:
2808         req->rq_send_state = aa->praa_old_state;
2809
2810         if (rc != 0)
2811                 /* this replay failed, so restart recovery */
2812                 ptlrpc_connect_import(imp);
2813
2814         RETURN(rc);
2815 }
2816
2817 /**
2818  * Prepares and queues request for replay.
2819  * Adds it to ptlrpcd queue for actual sending.
2820  * Returns 0 on success.
2821  */
2822 int ptlrpc_replay_req(struct ptlrpc_request *req)
2823 {
2824         struct ptlrpc_replay_async_args *aa;
2825         ENTRY;
2826
2827         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
2828
2829         LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
2830         aa = ptlrpc_req_async_args(req);
2831         memset(aa, 0, sizeof *aa);
2832
2833         /* Prepare request to be resent with ptlrpcd */
2834         aa->praa_old_state = req->rq_send_state;
2835         req->rq_send_state = LUSTRE_IMP_REPLAY;
2836         req->rq_phase = RQ_PHASE_NEW;
2837         req->rq_next_phase = RQ_PHASE_UNDEFINED;
2838         if (req->rq_repmsg)
2839                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
2840         req->rq_status = 0;
2841         req->rq_interpret_reply = ptlrpc_replay_interpret;
2842         /* Readjust the timeout for current conditions */
2843         ptlrpc_at_set_req_timeout(req);
2844
2845         /* Tell server the net_latency, so the server can calculate how long
2846          * it should wait for next replay */
2847         lustre_msg_set_service_time(req->rq_reqmsg,
2848                                     ptlrpc_at_get_net_latency(req));
2849         DEBUG_REQ(D_HA, req, "REPLAY");
2850
2851         atomic_inc(&req->rq_import->imp_replay_inflight);
2852         ptlrpc_request_addref(req);     /* ptlrpcd needs a ref */
2853
2854         ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
2855         RETURN(0);
2856 }
2857 EXPORT_SYMBOL(ptlrpc_replay_req);
2858
2859 /**
2860  * Aborts all in-flight request on import \a imp sending and delayed lists
2861  */
2862 void ptlrpc_abort_inflight(struct obd_import *imp)
2863 {
2864         struct list_head *tmp, *n;
2865         ENTRY;
2866
2867         /* Make sure that no new requests get processed for this import.
2868          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
2869          * this flag and then putting requests on sending_list or delayed_list.
2870          */
2871         spin_lock(&imp->imp_lock);
2872
2873         /* XXX locking?  Maybe we should remove each request with the list
2874          * locked?  Also, how do we know if the requests on the list are
2875          * being freed at this time?
2876          */
2877         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
2878                 struct ptlrpc_request *req = list_entry(tmp,
2879                                                         struct ptlrpc_request,
2880                                                         rq_list);
2881
2882                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
2883
2884                 spin_lock(&req->rq_lock);
2885                 if (req->rq_import_generation < imp->imp_generation) {
2886                         req->rq_err = 1;
2887                         req->rq_status = -EIO;
2888                         ptlrpc_client_wake_req(req);
2889                 }
2890                 spin_unlock(&req->rq_lock);
2891         }
2892
2893         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
2894                 struct ptlrpc_request *req =
2895                         list_entry(tmp, struct ptlrpc_request, rq_list);
2896
2897                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
2898
2899                 spin_lock(&req->rq_lock);
2900                 if (req->rq_import_generation < imp->imp_generation) {
2901                         req->rq_err = 1;
2902                         req->rq_status = -EIO;
2903                         ptlrpc_client_wake_req(req);
2904                 }
2905                 spin_unlock(&req->rq_lock);
2906         }
2907
2908         /* Last chance to free reqs left on the replay list, but we
2909          * will still leak reqs that haven't committed.  */
2910         if (imp->imp_replayable)
2911                 ptlrpc_free_committed(imp);
2912
2913         spin_unlock(&imp->imp_lock);
2914
2915         EXIT;
2916 }
2917 EXPORT_SYMBOL(ptlrpc_abort_inflight);
2918
2919 /**
2920  * Abort all uncompleted requests in request set \a set
2921  */
2922 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
2923 {
2924         struct list_head *tmp, *pos;
2925
2926         LASSERT(set != NULL);
2927
2928         list_for_each_safe(pos, tmp, &set->set_requests) {
2929                 struct ptlrpc_request *req =
2930                         list_entry(pos, struct ptlrpc_request,
2931                                    rq_set_chain);
2932
2933                 spin_lock(&req->rq_lock);
2934                 if (req->rq_phase != RQ_PHASE_RPC) {
2935                         spin_unlock(&req->rq_lock);
2936                         continue;
2937                 }
2938
2939                 req->rq_err = 1;
2940                 req->rq_status = -EINTR;
2941                 ptlrpc_client_wake_req(req);
2942                 spin_unlock(&req->rq_lock);
2943         }
2944 }
2945
2946 static __u64 ptlrpc_last_xid;
2947 static spinlock_t ptlrpc_last_xid_lock;
2948
2949 /**
2950  * Initialize the XID for the node.  This is common among all requests on
2951  * this node, and only requires the property that it is monotonically
2952  * increasing.  It does not need to be sequential.  Since this is also used
2953  * as the RDMA match bits, it is important that a single client NOT have
2954  * the same match bits for two different in-flight requests, hence we do
2955  * NOT want to have an XID per target or similar.
2956  *
2957  * To avoid an unlikely collision between match bits after a client reboot
2958  * (which would deliver old data into the wrong RDMA buffer) initialize
2959  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
2960  * If the time is clearly incorrect, we instead use a 62-bit random number.
2961  * In the worst case the random number will overflow 1M RPCs per second in
2962  * 9133 years, or permutations thereof.
2963  */
2964 #define YEAR_2004 (1ULL << 30)
2965 void ptlrpc_init_xid(void)
2966 {
2967         time_t now = cfs_time_current_sec();
2968
2969         spin_lock_init(&ptlrpc_last_xid_lock);
2970         if (now < YEAR_2004) {
2971                 cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
2972                 ptlrpc_last_xid >>= 2;
2973                 ptlrpc_last_xid |= (1ULL << 61);
2974         } else {
2975                 ptlrpc_last_xid = (__u64)now << 20;
2976         }
2977
2978         /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
2979         CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
2980         ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
2981 }
2982
2983 /**
2984  * Increase xid and returns resulting new value to the caller.
2985  *
2986  * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
2987  * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
2988  * itself uses the last bulk xid needed, so the server can determine the
2989  * the number of bulk transfers from the RPC XID and a bitmask.  The starting
2990  * xid must align to a power-of-two value.
2991  *
2992  * This is assumed to be true due to the initial ptlrpc_last_xid
2993  * value also being initialized to a power-of-two value. LU-1431
2994  */
2995 __u64 ptlrpc_next_xid(void)
2996 {
2997         __u64 next;
2998
2999         spin_lock(&ptlrpc_last_xid_lock);
3000         next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
3001         ptlrpc_last_xid = next;
3002         spin_unlock(&ptlrpc_last_xid_lock);
3003
3004         return next;
3005 }
3006 EXPORT_SYMBOL(ptlrpc_next_xid);
3007
3008 /**
3009  * Get a glimpse at what next xid value might have been.
3010  * Returns possible next xid.
3011  */
3012 __u64 ptlrpc_sample_next_xid(void)
3013 {
3014 #if BITS_PER_LONG == 32
3015         /* need to avoid possible word tearing on 32-bit systems */
3016         __u64 next;
3017
3018         spin_lock(&ptlrpc_last_xid_lock);
3019         next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
3020         spin_unlock(&ptlrpc_last_xid_lock);
3021
3022         return next;
3023 #else
3024         /* No need to lock, since returned value is racy anyways */
3025         return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
3026 #endif
3027 }
3028 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
3029
3030 /**
3031  * Functions for operating ptlrpc workers.
3032  *
3033  * A ptlrpc work is a function which will be running inside ptlrpc context.
3034  * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
3035  *
3036  * 1. after a work is created, it can be used many times, that is:
3037  *         handler = ptlrpcd_alloc_work();
3038  *         ptlrpcd_queue_work();
3039  *
3040  *    queue it again when necessary:
3041  *         ptlrpcd_queue_work();
3042  *         ptlrpcd_destroy_work();
3043  * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
3044  *    it will only be queued once in any time. Also as its name implies, it may
3045  *    have delay before it really runs by ptlrpcd thread.
3046  */
3047 struct ptlrpc_work_async_args {
3048         int   (*cb)(const struct lu_env *, void *);
3049         void   *cbdata;
3050 };
3051
3052 static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
3053 {
3054         /* re-initialize the req */
3055         req->rq_timeout         = obd_timeout;
3056         req->rq_sent            = cfs_time_current_sec();
3057         req->rq_deadline        = req->rq_sent + req->rq_timeout;
3058         req->rq_reply_deadline  = req->rq_deadline;
3059         req->rq_phase           = RQ_PHASE_INTERPRET;
3060         req->rq_next_phase      = RQ_PHASE_COMPLETE;
3061         req->rq_xid             = ptlrpc_next_xid();
3062         req->rq_import_generation = req->rq_import->imp_generation;
3063
3064         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
3065 }
3066
3067 static int work_interpreter(const struct lu_env *env,
3068                             struct ptlrpc_request *req, void *data, int rc)
3069 {
3070         struct ptlrpc_work_async_args *arg = data;
3071
3072         LASSERT(ptlrpcd_check_work(req));
3073         LASSERT(arg->cb != NULL);
3074
3075         rc = arg->cb(env, arg->cbdata);
3076
3077         list_del_init(&req->rq_set_chain);
3078         req->rq_set = NULL;
3079
3080         if (atomic_dec_return(&req->rq_refcount) > 1) {
3081                 atomic_set(&req->rq_refcount, 2);
3082                 ptlrpcd_add_work_req(req);
3083         }
3084         return rc;
3085 }
3086
3087 static int worker_format;
3088
3089 static int ptlrpcd_check_work(struct ptlrpc_request *req)
3090 {
3091         return req->rq_pill.rc_fmt == (void *)&worker_format;
3092 }
3093
3094 /**
3095  * Create a work for ptlrpc.
3096  */
3097 void *ptlrpcd_alloc_work(struct obd_import *imp,
3098                          int (*cb)(const struct lu_env *, void *), void *cbdata)
3099 {
3100         struct ptlrpc_request         *req = NULL;
3101         struct ptlrpc_work_async_args *args;
3102         ENTRY;
3103
3104         might_sleep();
3105
3106         if (cb == NULL)
3107                 RETURN(ERR_PTR(-EINVAL));
3108
3109         /* copy some code from deprecated fakereq. */
3110         req = ptlrpc_request_cache_alloc(GFP_NOFS);
3111         if (req == NULL) {
3112                 CERROR("ptlrpc: run out of memory!\n");
3113                 RETURN(ERR_PTR(-ENOMEM));
3114         }
3115
3116         req->rq_send_state = LUSTRE_IMP_FULL;
3117         req->rq_type = PTL_RPC_MSG_REQUEST;
3118         req->rq_import = class_import_get(imp);
3119         req->rq_export = NULL;
3120         req->rq_interpret_reply = work_interpreter;
3121         /* don't want reply */
3122         req->rq_receiving_reply = 0;
3123         req->rq_req_unlink = req->rq_reply_unlink = 0;
3124         req->rq_no_delay = req->rq_no_resend = 1;
3125         req->rq_pill.rc_fmt = (void *)&worker_format;
3126
3127         spin_lock_init(&req->rq_lock);
3128         INIT_LIST_HEAD(&req->rq_list);
3129         INIT_LIST_HEAD(&req->rq_replay_list);
3130         INIT_LIST_HEAD(&req->rq_set_chain);
3131         INIT_LIST_HEAD(&req->rq_history_list);
3132         INIT_LIST_HEAD(&req->rq_exp_list);
3133         init_waitqueue_head(&req->rq_reply_waitq);
3134         init_waitqueue_head(&req->rq_set_waitq);
3135         atomic_set(&req->rq_refcount, 1);
3136
3137         CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
3138         args = ptlrpc_req_async_args(req);
3139         args->cb     = cb;
3140         args->cbdata = cbdata;
3141
3142         RETURN(req);
3143 }
3144 EXPORT_SYMBOL(ptlrpcd_alloc_work);
3145
3146 void ptlrpcd_destroy_work(void *handler)
3147 {
3148         struct ptlrpc_request *req = handler;
3149
3150         if (req)
3151                 ptlrpc_req_finished(req);
3152 }
3153 EXPORT_SYMBOL(ptlrpcd_destroy_work);
3154
3155 int ptlrpcd_queue_work(void *handler)
3156 {
3157         struct ptlrpc_request *req = handler;
3158
3159         /*
3160          * Check if the req is already being queued.
3161          *
3162          * Here comes a trick: it lacks a way of checking if a req is being
3163          * processed reliably in ptlrpc. Here I have to use refcount of req
3164          * for this purpose. This is okay because the caller should use this
3165          * req as opaque data. - Jinshan
3166          */
3167         LASSERT(atomic_read(&req->rq_refcount) > 0);
3168         if (atomic_inc_return(&req->rq_refcount) == 2)
3169                 ptlrpcd_add_work_req(req);
3170         return 0;
3171 }
3172 EXPORT_SYMBOL(ptlrpcd_queue_work);