Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38 #ifndef __KERNEL__
39 #include <errno.h>
40 #include <signal.h>
41 #include <liblustre.h>
42 #endif
43
44 #include <obd_support.h>
45 #include <obd_class.h>
46 #include <lustre_lib.h>
47 #include <lustre_ha.h>
48 #include <lustre_import.h>
49 #include <lustre_req_layout.h>
50
51 #include "ptlrpc_internal.h"
52
53 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
54                         struct ptlrpc_client *cl)
55 {
56         cl->cli_request_portal = req_portal;
57         cl->cli_reply_portal   = rep_portal;
58         cl->cli_name           = name;
59 }
60
61 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
62 {
63         struct ptlrpc_connection *c;
64         lnet_nid_t                self;
65         lnet_process_id_t         peer;
66         int                       err;
67
68         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
69         if (err != 0) {
70                 CERROR("cannot find peer %s!\n", uuid->uuid);
71                 return NULL;
72         }
73
74         c = ptlrpc_connection_get(peer, self, uuid);
75         if (c) {
76                 memcpy(c->c_remote_uuid.uuid,
77                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
78         }
79
80         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
81
82         return c;
83 }
84
85 static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
86 {
87         struct ptlrpc_bulk_desc *desc;
88
89         OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages]));
90         if (!desc)
91                 return NULL;
92
93         spin_lock_init(&desc->bd_lock);
94         cfs_waitq_init(&desc->bd_waitq);
95         desc->bd_max_iov = npages;
96         desc->bd_iov_count = 0;
97         desc->bd_md_h = LNET_INVALID_HANDLE;
98         desc->bd_portal = portal;
99         desc->bd_type = type;
100
101         return desc;
102 }
103
104 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
105                                                int npages, int type, int portal)
106 {
107         struct obd_import *imp = req->rq_import;
108         struct ptlrpc_bulk_desc *desc;
109
110         ENTRY;
111         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
112         desc = new_bulk(npages, type, portal);
113         if (desc == NULL)
114                 RETURN(NULL);
115
116         desc->bd_import_generation = req->rq_import_generation;
117         desc->bd_import = class_import_get(imp);
118         desc->bd_req = req;
119
120         desc->bd_cbid.cbid_fn  = client_bulk_callback;
121         desc->bd_cbid.cbid_arg = desc;
122
123         /* This makes req own desc, and free it when she frees herself */
124         req->rq_bulk = desc;
125
126         return desc;
127 }
128
129 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
130                                               int npages, int type, int portal)
131 {
132         struct obd_export *exp = req->rq_export;
133         struct ptlrpc_bulk_desc *desc;
134
135         ENTRY;
136         LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
137
138         desc = new_bulk(npages, type, portal);
139         if (desc == NULL)
140                 RETURN(NULL);
141
142         desc->bd_export = class_export_get(exp);
143         desc->bd_req = req;
144
145         desc->bd_cbid.cbid_fn  = server_bulk_callback;
146         desc->bd_cbid.cbid_arg = desc;
147
148         /* NB we don't assign rq_bulk here; server-side requests are
149          * re-used, and the handler frees the bulk desc explicitly. */
150
151         return desc;
152 }
153
154 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
155                            cfs_page_t *page, int pageoffset, int len)
156 {
157         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
158         LASSERT(page != NULL);
159         LASSERT(pageoffset >= 0);
160         LASSERT(len > 0);
161         LASSERT(pageoffset + len <= CFS_PAGE_SIZE);
162
163         desc->bd_nob += len;
164
165         ptlrpc_add_bulk_page(desc, page, pageoffset, len);
166 }
167
168 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
169 {
170         ENTRY;
171
172         LASSERT(desc != NULL);
173         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
174         LASSERT(!desc->bd_network_rw);         /* network hands off or */
175         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
176
177         sptlrpc_enc_pool_put_pages(desc);
178
179         if (desc->bd_export)
180                 class_export_put(desc->bd_export);
181         else
182                 class_import_put(desc->bd_import);
183
184         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
185                                 bd_iov[desc->bd_max_iov]));
186         EXIT;
187 }
188
189 /* Set server timelimit for this req */
190 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
191 {
192         __u32 serv_est;
193         int idx;
194         struct imp_at *at;
195
196         LASSERT(req->rq_import);
197
198         if (AT_OFF) {
199                 /* non-AT settings */
200                 req->rq_timeout = req->rq_import->imp_server_timeout ?
201                         obd_timeout / 2 : obd_timeout;
202                 lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
203                 return;
204         }
205
206         at = &req->rq_import->imp_at;
207         idx = import_at_get_index(req->rq_import,
208                                   req->rq_request_portal);
209         serv_est = at_get(&at->iat_service_estimate[idx]);
210         /* add an arbitrary minimum: 125% +5 sec */
211         req->rq_timeout = serv_est + (serv_est >> 2) + 5;
212         /* We could get even fancier here, using history to predict increased
213            loading... */
214
215         /* Let the server know what this RPC timeout is by putting it in the
216            reqmsg*/
217         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
218 }
219
220 /* Adjust max service estimate based on server value */
221 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
222                                   unsigned int serv_est)
223 {
224         int idx;
225         unsigned int oldse;
226         struct imp_at *at;
227
228         LASSERT(req->rq_import);
229         at = &req->rq_import->imp_at;
230
231         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
232         /* max service estimates are tracked on the server side,
233            so just keep minimal history here */
234         oldse = at_add(&at->iat_service_estimate[idx], serv_est);
235         if (oldse != 0)
236                 CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
237                        "has changed from %d to %d\n",
238                        req->rq_import->imp_obd->obd_name,req->rq_request_portal,
239                        oldse, at_get(&at->iat_service_estimate[idx]));
240 }
241
242 /* Expected network latency per remote node (secs) */
243 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
244 {
245         return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
246 }
247
248 /* Adjust expected network latency */
249 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
250                                       unsigned int service_time)
251 {
252         unsigned int nl, oldnl;
253         struct imp_at *at;
254         time_t now = cfs_time_current_sec();
255
256         LASSERT(req->rq_import);
257         at = &req->rq_import->imp_at;
258
259         /* Network latency is total time less server processing time */
260         nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/;
261         if (service_time > now - req->rq_sent + 3 /* bz16408 */)
262                 CWARN("Reported service time %u > total measured time "
263                       CFS_DURATION_T"\n", service_time,
264                       cfs_time_sub(now, req->rq_sent));
265
266         oldnl = at_add(&at->iat_net_latency, nl);
267         if (oldnl != 0)
268                 CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
269                        "has changed from %d to %d\n",
270                        req->rq_import->imp_obd->obd_name,
271                        obd_uuid2str(
272                                &req->rq_import->imp_connection->c_remote_uuid),
273                        oldnl, at_get(&at->iat_net_latency));
274 }
275
276 static int unpack_reply(struct ptlrpc_request *req)
277 {
278         int rc;
279
280         /* Clear reply swab mask; we may have already swabbed an early reply */
281         req->rq_rep_swab_mask = 0;
282
283         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
284         if (rc) {
285                 DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
286                 return(-EPROTO);
287         }
288
289         rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
290         if (rc) {
291                 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
292                 return(-EPROTO);
293         }
294         return 0;
295 }
296
297 /*
298  * Handle an early reply message, called with the rq_lock held.
299  * If anything goes wrong just ignore it - same as if it never happened
300  */
301 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
302 {
303         struct ptlrpc_request *early_req;
304         time_t                 olddl;
305         int                    rc;
306         ENTRY;
307
308         req->rq_early = 0;
309         spin_unlock(&req->rq_lock);
310
311         rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
312         if (rc) {
313                 spin_lock(&req->rq_lock);
314                 RETURN(rc);
315         }
316
317         rc = unpack_reply(early_req);
318         if (rc == 0) {
319                 /* Expecting to increase the service time estimate here */
320                 ptlrpc_at_adj_service(req,
321                         lustre_msg_get_timeout(early_req->rq_repmsg));
322                 ptlrpc_at_adj_net_latency(req,
323                         lustre_msg_get_service_time(early_req->rq_repmsg));
324         }
325
326         sptlrpc_cli_finish_early_reply(early_req);
327
328         spin_lock(&req->rq_lock);
329
330         if (rc == 0) {
331                 /* Adjust the local timeout for this req */
332                 ptlrpc_at_set_req_timeout(req);
333
334                 olddl = req->rq_deadline;
335                 /* server assumes it now has rq_timeout from when it sent the
336                    early reply, so client should give it at least that long. */
337                 req->rq_deadline = cfs_time_current_sec() + req->rq_timeout +
338                             ptlrpc_at_get_net_latency(req);
339
340                 DEBUG_REQ(D_ADAPTTO, req,
341                           "Early reply #%d, new deadline in "CFS_DURATION_T"s "
342                           "("CFS_DURATION_T"s)", req->rq_early_count,
343                           cfs_time_sub(req->rq_deadline,
344                                        cfs_time_current_sec()),
345                           cfs_time_sub(req->rq_deadline, olddl));
346         }
347
348         RETURN(rc);
349 }
350
351 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
352 {
353         struct list_head *l, *tmp;
354         struct ptlrpc_request *req;
355
356         if (!pool)
357                 return;
358
359         list_for_each_safe(l, tmp, &pool->prp_req_list) {
360                 req = list_entry(l, struct ptlrpc_request, rq_list);
361                 list_del(&req->rq_list);
362                 LASSERT(req->rq_reqbuf);
363                 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
364                 OBD_FREE(req->rq_reqbuf, pool->prp_rq_size);
365                 OBD_FREE(req, sizeof(*req));
366         }
367         OBD_FREE(pool, sizeof(*pool));
368 }
369
370 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
371 {
372         int i;
373         int size = 1;
374
375         while (size < pool->prp_rq_size + SPTLRPC_MAX_PAYLOAD)
376                 size <<= 1;
377
378         LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size,
379                  "Trying to change pool size with nonempty pool "
380                  "from %d to %d bytes\n", pool->prp_rq_size, size);
381
382         spin_lock(&pool->prp_lock);
383         pool->prp_rq_size = size;
384         for (i = 0; i < num_rq; i++) {
385                 struct ptlrpc_request *req;
386                 struct lustre_msg *msg;
387
388                 spin_unlock(&pool->prp_lock);
389                 OBD_ALLOC(req, sizeof(struct ptlrpc_request));
390                 if (!req)
391                         return;
392                 OBD_ALLOC_GFP(msg, size, CFS_ALLOC_STD);
393                 if (!msg) {
394                         OBD_FREE(req, sizeof(struct ptlrpc_request));
395                         return;
396                 }
397                 req->rq_reqbuf = msg;
398                 req->rq_reqbuf_len = size;
399                 req->rq_pool = pool;
400                 spin_lock(&pool->prp_lock);
401                 list_add_tail(&req->rq_list, &pool->prp_req_list);
402         }
403         spin_unlock(&pool->prp_lock);
404         return;
405 }
406
407 struct ptlrpc_request_pool *ptlrpc_init_rq_pool(int num_rq, int msgsize,
408                                                 void (*populate_pool)(struct ptlrpc_request_pool *, int))
409 {
410         struct ptlrpc_request_pool *pool;
411
412         OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
413         if (!pool)
414                 return NULL;
415
416         /* Request next power of two for the allocation, because internally
417            kernel would do exactly this */
418
419         spin_lock_init(&pool->prp_lock);
420         CFS_INIT_LIST_HEAD(&pool->prp_req_list);
421         pool->prp_rq_size = msgsize;
422         pool->prp_populate = populate_pool;
423
424         populate_pool(pool, num_rq);
425
426         if (list_empty(&pool->prp_req_list)) {
427                 /* have not allocated a single request for the pool */
428                 OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
429                 pool = NULL;
430         }
431         return pool;
432 }
433
434 static struct ptlrpc_request *ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
435 {
436         struct ptlrpc_request *request;
437         struct lustre_msg *reqbuf;
438
439         if (!pool)
440                 return NULL;
441
442         spin_lock(&pool->prp_lock);
443
444         /* See if we have anything in a pool, and bail out if nothing,
445          * in writeout path, where this matters, this is safe to do, because
446          * nothing is lost in this case, and when some in-flight requests
447          * complete, this code will be called again. */
448         if (unlikely(list_empty(&pool->prp_req_list))) {
449                 spin_unlock(&pool->prp_lock);
450                 return NULL;
451         }
452
453         request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
454                              rq_list);
455         list_del(&request->rq_list);
456         spin_unlock(&pool->prp_lock);
457
458         LASSERT(request->rq_reqbuf);
459         LASSERT(request->rq_pool);
460
461         reqbuf = request->rq_reqbuf;
462         memset(request, 0, sizeof(*request));
463         request->rq_reqbuf = reqbuf;
464         request->rq_reqbuf_len = pool->prp_rq_size;
465         request->rq_pool = pool;
466
467         return request;
468 }
469
470 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
471 {
472         struct ptlrpc_request_pool *pool = request->rq_pool;
473
474         spin_lock(&pool->prp_lock);
475         LASSERT(list_empty(&request->rq_list));
476         list_add_tail(&request->rq_list, &pool->prp_req_list);
477         spin_unlock(&pool->prp_lock);
478 }
479
480 static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
481                                       __u32 version, int opcode,
482                                       int count, __u32 *lengths, char **bufs,
483                                       struct ptlrpc_cli_ctx *ctx)
484 {
485         struct obd_import  *imp = request->rq_import;
486         int                 rc;
487         ENTRY;
488
489         if (unlikely(ctx))
490                 request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
491         else {
492                 rc = sptlrpc_req_get_ctx(request);
493                 if (rc)
494                         GOTO(out_free, rc);
495         }
496
497         sptlrpc_req_set_flavor(request, opcode);
498
499         rc = lustre_pack_request(request, imp->imp_msg_magic, count,
500                                  lengths, bufs);
501         if (rc) {
502                 LASSERT(!request->rq_pool);
503                 GOTO(out_ctx, rc);
504         }
505
506         lustre_msg_add_version(request->rq_reqmsg, version);
507         request->rq_send_state = LUSTRE_IMP_FULL;
508         request->rq_type = PTL_RPC_MSG_REQUEST;
509         request->rq_export = NULL;
510
511         request->rq_req_cbid.cbid_fn  = request_out_callback;
512         request->rq_req_cbid.cbid_arg = request;
513
514         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
515         request->rq_reply_cbid.cbid_arg = request;
516
517         request->rq_phase = RQ_PHASE_NEW;
518
519         request->rq_request_portal = imp->imp_client->cli_request_portal;
520         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
521
522         ptlrpc_at_set_req_timeout(request);
523
524         spin_lock_init(&request->rq_lock);
525         CFS_INIT_LIST_HEAD(&request->rq_list);
526         CFS_INIT_LIST_HEAD(&request->rq_timed_list);
527         CFS_INIT_LIST_HEAD(&request->rq_replay_list);
528         CFS_INIT_LIST_HEAD(&request->rq_mod_list);
529         CFS_INIT_LIST_HEAD(&request->rq_ctx_chain);
530         CFS_INIT_LIST_HEAD(&request->rq_set_chain);
531         CFS_INIT_LIST_HEAD(&request->rq_history_list);
532         cfs_waitq_init(&request->rq_reply_waitq);
533         request->rq_xid = ptlrpc_next_xid();
534         atomic_set(&request->rq_refcount, 1);
535
536         lustre_msg_set_opc(request->rq_reqmsg, opcode);
537
538         RETURN(0);
539 out_ctx:
540         sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
541 out_free:
542         class_import_put(imp);
543         return rc;
544 }
545
546 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
547                              __u32 version, int opcode, char **bufs,
548                              struct ptlrpc_cli_ctx *ctx)
549 {
550         int count;
551
552         count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
553         return __ptlrpc_request_bufs_pack(request, version, opcode, count,
554                                           request->rq_pill.rc_area[RCL_CLIENT],
555                                           bufs, ctx);
556 }
557 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
558
559 int ptlrpc_request_pack(struct ptlrpc_request *request,
560                         __u32 version, int opcode)
561 {
562         return ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
563 }
564
565 static inline
566 struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
567                                               struct ptlrpc_request_pool *pool)
568 {
569         struct ptlrpc_request *request = NULL;
570
571         if (pool)
572                 request = ptlrpc_prep_req_from_pool(pool);
573
574         if (!request)
575                 OBD_ALLOC_PTR(request);
576
577         if (request) {
578                 LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
579                 LASSERT(imp != LP_POISON);
580                 LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
581                         imp->imp_client);
582                 LASSERT(imp->imp_client != LP_POISON);
583
584                 request->rq_import = class_import_get(imp);
585         } else {
586                 CERROR("request allocation out of memory\n");
587         }
588
589         return request;
590 }
591
592 static struct ptlrpc_request *
593 ptlrpc_request_alloc_internal(struct obd_import *imp,
594                               struct ptlrpc_request_pool * pool,
595                               const struct req_format *format)
596 {
597         struct ptlrpc_request *request;
598
599         request = __ptlrpc_request_alloc(imp, pool);
600         if (request == NULL)
601                 return NULL;
602
603         req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
604         req_capsule_set(&request->rq_pill, format);
605         return request;
606 }
607
608 struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
609                                             const struct req_format *format)
610 {
611         return ptlrpc_request_alloc_internal(imp, NULL, format);
612 }
613
614 struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
615                                             struct ptlrpc_request_pool * pool,
616                                             const struct req_format *format)
617 {
618         return ptlrpc_request_alloc_internal(imp, pool, format);
619 }
620
621 void ptlrpc_request_free(struct ptlrpc_request *request)
622 {
623         if (request->rq_pool)
624                 __ptlrpc_free_req_to_pool(request);
625         else
626                 OBD_FREE_PTR(request);
627 }
628
629 struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
630                                                 const struct req_format *format,
631                                                 __u32 version, int opcode)
632 {
633         struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
634         int                    rc;
635
636         if (req) {
637                 rc = ptlrpc_request_pack(req, version, opcode);
638                 if (rc) {
639                         ptlrpc_request_free(req);
640                         req = NULL;
641                 }
642         }
643         return req;
644 }
645
646 struct ptlrpc_request *
647 ptlrpc_prep_req_pool(struct obd_import *imp,
648                      __u32 version, int opcode,
649                      int count, __u32 *lengths, char **bufs,
650                      struct ptlrpc_request_pool *pool)
651 {
652         struct ptlrpc_request *request;
653         int                    rc;
654
655         request = __ptlrpc_request_alloc(imp, pool);
656         if (!request)
657                 return NULL;
658
659         rc = __ptlrpc_request_bufs_pack(request, version, opcode, count,
660                                         lengths, bufs, NULL);
661         if (rc) {
662                 ptlrpc_request_free(request);
663                 request = NULL;
664         }
665         return request;
666 }
667
668 struct ptlrpc_request *
669 ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
670                 __u32 *lengths, char **bufs)
671 {
672         return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
673                                     NULL);
674 }
675
676 struct ptlrpc_request_set *ptlrpc_prep_set(void)
677 {
678         struct ptlrpc_request_set *set;
679
680         ENTRY;
681         OBD_ALLOC(set, sizeof *set);
682         if (!set)
683                 RETURN(NULL);
684         CFS_INIT_LIST_HEAD(&set->set_requests);
685         cfs_waitq_init(&set->set_waitq);
686         set->set_remaining = 0;
687         spin_lock_init(&set->set_new_req_lock);
688         CFS_INIT_LIST_HEAD(&set->set_new_requests);
689         CFS_INIT_LIST_HEAD(&set->set_cblist);
690
691         RETURN(set);
692 }
693
694 /* Finish with this set; opposite of prep_set. */
695 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
696 {
697         struct list_head *tmp;
698         struct list_head *next;
699         int               expected_phase;
700         int               n = 0;
701         ENTRY;
702
703         /* Requests on the set should either all be completed, or all be new */
704         expected_phase = (set->set_remaining == 0) ?
705                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
706         list_for_each (tmp, &set->set_requests) {
707                 struct ptlrpc_request *req =
708                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
709
710                 LASSERT(req->rq_phase == expected_phase);
711                 n++;
712         }
713
714         LASSERT(set->set_remaining == 0 || set->set_remaining == n);
715
716         list_for_each_safe(tmp, next, &set->set_requests) {
717                 struct ptlrpc_request *req =
718                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
719                 list_del_init(&req->rq_set_chain);
720
721                 LASSERT(req->rq_phase == expected_phase);
722
723                 if (req->rq_phase == RQ_PHASE_NEW) {
724
725                         if (req->rq_interpret_reply != NULL) {
726                                 int (*interpreter)(struct ptlrpc_request *,
727                                                    void *, int) =
728                                         req->rq_interpret_reply;
729
730                                 /* higher level (i.e. LOV) failed;
731                                  * let the sub reqs clean up */
732                                 req->rq_status = -EBADR;
733                                 interpreter(req, &req->rq_async_args,
734                                             req->rq_status);
735                         }
736                         set->set_remaining--;
737                 }
738
739                 req->rq_set = NULL;
740                 ptlrpc_req_finished (req);
741         }
742
743         LASSERT(set->set_remaining == 0);
744
745         OBD_FREE(set, sizeof(*set));
746         EXIT;
747 }
748
749 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
750                       set_interpreter_func fn, void *data)
751 {
752         struct ptlrpc_set_cbdata *cbdata;
753
754         OBD_ALLOC_PTR(cbdata);
755         if (cbdata == NULL)
756                 RETURN(-ENOMEM);
757
758         cbdata->psc_interpret = fn;
759         cbdata->psc_data = data;
760         list_add_tail(&cbdata->psc_item, &set->set_cblist);
761
762         RETURN(0);
763 }
764
765 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
766                         struct ptlrpc_request *req)
767 {
768         /* The set takes over the caller's request reference */
769         list_add_tail(&req->rq_set_chain, &set->set_requests);
770         req->rq_set = set;
771         set->set_remaining++;
772
773         atomic_inc(&req->rq_import->imp_inflight);
774 }
775
776 /**
777  * Lock so many callers can add things, the context that owns the set
778  * is supposed to notice these and move them into the set proper.
779  */
780 int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
781                            struct ptlrpc_request *req)
782 {
783         struct ptlrpc_request_set *set = pc->pc_set;
784
785         /*
786          * Let caller know that we stopped and will not handle this request.
787          * It needs to take care itself of request.
788          */
789         if (test_bit(LIOD_STOP, &pc->pc_flags))
790                 return -EALREADY;
791
792         spin_lock(&set->set_new_req_lock);
793         /*
794          * The set takes over the caller's request reference.
795          */
796         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
797         req->rq_set = set;
798         spin_unlock(&set->set_new_req_lock);
799
800         /*
801          * Let thead know that we added something and better it to wake up
802          * and process.
803          */
804         cfs_waitq_signal(&set->set_waitq);
805         return 0;
806 }
807
808 /*
809  * Based on the current state of the import, determine if the request
810  * can be sent, is an error, or should be delayed.
811  *
812  * Returns true if this request should be delayed. If false, and
813  * *status is set, then the request can not be sent and *status is the
814  * error code.  If false and status is 0, then request can be sent.
815  *
816  * The imp->imp_lock must be held.
817  */
818 static int ptlrpc_import_delay_req(struct obd_import *imp,
819                                    struct ptlrpc_request *req, int *status)
820 {
821         int delay = 0;
822         ENTRY;
823
824         LASSERT (status != NULL);
825         *status = 0;
826
827         if (req->rq_ctx_init || req->rq_ctx_fini) {
828                 /* always allow ctx init/fini rpc go through */
829         } else if (imp->imp_state == LUSTRE_IMP_NEW) {
830                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
831                 *status = -EIO;
832                 LBUG();
833         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
834                 DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
835                 *status = -EIO;
836         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
837                    imp->imp_state == LUSTRE_IMP_CONNECTING) {
838                 /* allow CONNECT even if import is invalid */ ;
839                 if (atomic_read(&imp->imp_inval_count) != 0) {
840                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
841                         *status = -EIO;
842                 }
843         } else if ((imp->imp_invalid && (!imp->imp_recon_bk)) ||
844                                          imp->imp_obd->obd_no_recov) {
845                 /* If the import has been invalidated (such as by an OST
846                  * failure), and if the import(MGC) tried all of its connection
847                  * list (Bug 13464), the request must fail with -ESHUTDOWN.
848                  * This indicates the requests should be discarded; an -EIO
849                  * may result in a resend of the request. */
850                 if (!imp->imp_deactive)
851                           DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
852                 *status = -ESHUTDOWN; /* bz 12940 */
853         } else if (req->rq_import_generation != imp->imp_generation) {
854                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
855                 *status = -EIO;
856         } else if (req->rq_send_state != imp->imp_state) {
857                 /* invalidate in progress - any requests should be drop */
858                 if (atomic_read(&imp->imp_inval_count) != 0) {
859                         DEBUG_REQ(D_ERROR, req, "invalidate in flight");
860                         *status = -EIO;
861                 } else if (imp->imp_dlm_fake || req->rq_no_delay) {
862                         *status = -EWOULDBLOCK;
863                 } else {
864                         delay = 1;
865                 }
866         }
867
868         RETURN(delay);
869 }
870
871 static int ptlrpc_check_reply(struct ptlrpc_request *req)
872 {
873         int rc = 0;
874         ENTRY;
875
876         /* serialise with network callback */
877         spin_lock(&req->rq_lock);
878
879         if (req->rq_replied)
880                 GOTO(out, rc = 1);
881
882         if (req->rq_net_err && !req->rq_timedout) {
883                 spin_unlock(&req->rq_lock);
884                 rc = ptlrpc_expire_one_request(req);
885                 spin_lock(&req->rq_lock);
886                 GOTO(out, rc);
887         }
888
889         if (req->rq_err)
890                 GOTO(out, rc = 1);
891
892         if (req->rq_resend)
893                 GOTO(out, rc = 1);
894
895         if (req->rq_restart)
896                 GOTO(out, rc = 1);
897
898         if (req->rq_early) {
899                 ptlrpc_at_recv_early_reply(req);
900                 GOTO(out, rc = 0); /* keep waiting */
901         }
902
903         EXIT;
904  out:
905         spin_unlock(&req->rq_lock);
906         DEBUG_REQ(D_NET, req, "rc = %d for", rc);
907         return rc;
908 }
909
910 static int ptlrpc_check_status(struct ptlrpc_request *req)
911 {
912         int err;
913         ENTRY;
914
915         err = lustre_msg_get_status(req->rq_repmsg);
916         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
917                 struct obd_import *imp = req->rq_import;
918                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
919                 LCONSOLE_ERROR_MSG(0x011,"an error occurred while communicating"
920                                 " with %s. The %s operation failed with %d\n",
921                                 libcfs_nid2str(imp->imp_connection->c_peer.nid),
922                                 ll_opcode2str(opc), err);
923                 RETURN(err < 0 ? err : -EINVAL);
924         }
925
926         if (err < 0) {
927                 DEBUG_REQ(D_INFO, req, "status is %d", err);
928         } else if (err > 0) {
929                 /* XXX: translate this error from net to host */
930                 DEBUG_REQ(D_INFO, req, "status is %d", err);
931         }
932
933         RETURN(err);
934 }
935
936 /**
937  * Callback function called when client receives RPC reply for \a req.
938  */
939 static int after_reply(struct ptlrpc_request *req)
940 {
941         struct obd_import *imp = req->rq_import;
942         struct obd_device *obd = req->rq_import->imp_obd;
943         int rc;
944         struct timeval work_start;
945         long timediff;
946         ENTRY;
947
948         LASSERT(!req->rq_receiving_reply);
949         LASSERT(obd);
950         LASSERT(req->rq_nob_received <= req->rq_repbuf_len);
951
952         /*
953          * NB Until this point, the whole of the incoming message,
954          * including buflens, status etc is in the sender's byte order.
955          */
956
957         rc = sptlrpc_cli_unwrap_reply(req);
958         if (rc) {
959                 DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
960                 RETURN(rc);
961         }
962
963         /*
964          * Security layer unwrap might ask resend this request.
965          */
966         if (req->rq_resend)
967                 RETURN(0);
968
969         rc = unpack_reply(req);
970         if (rc)
971                 RETURN(rc);
972
973         do_gettimeofday(&work_start);
974         timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
975         if (obd->obd_svc_stats != NULL)
976                 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
977                                     timediff);
978
979         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
980             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
981                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
982                           lustre_msg_get_type(req->rq_repmsg));
983                 RETURN(-EPROTO);
984         }
985
986         OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val);
987         ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
988         ptlrpc_at_adj_net_latency(req,
989                                   lustre_msg_get_service_time(req->rq_repmsg));
990
991         rc = ptlrpc_check_status(req);
992         imp->imp_connect_error = rc;
993
994         if (rc) {
995                 /*
996                  * Either we've been evicted, or the server has failed for
997                  * some reason. Try to reconnect, and if that fails, punt to
998                  * the upcall.
999                  */
1000                 if (ll_rpc_recoverable_error(rc)) {
1001                         if (req->rq_send_state != LUSTRE_IMP_FULL ||
1002                             imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1003                                 RETURN(rc);
1004                         }
1005                         ptlrpc_request_handle_notconn(req);
1006                         RETURN(rc);
1007                 }
1008         } else {
1009                 /*
1010                  * Let's look if server sent slv. Do it only for RPC with
1011                  * rc == 0.
1012                  */
1013                 ldlm_cli_update_pool(req);
1014         }
1015
1016         /*
1017          * Store transno in reqmsg for replay.
1018          */
1019         req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1020         lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1021
1022         if (req->rq_import->imp_replayable) {
1023                 spin_lock(&imp->imp_lock);
1024                 /*
1025                  * No point in adding already-committed requests to the replay
1026                  * list, we will just remove them immediately. b=9829
1027                  */
1028                 if (req->rq_transno != 0 &&
1029                     (req->rq_transno >
1030                      lustre_msg_get_last_committed(req->rq_repmsg) ||
1031                      req->rq_replay))
1032                         ptlrpc_retain_replayable_request(req, imp);
1033                 else if (req->rq_commit_cb != NULL) {
1034                         spin_unlock(&imp->imp_lock);
1035                         req->rq_commit_cb(req);
1036                         spin_lock(&imp->imp_lock);
1037                 }
1038
1039                 /*
1040                  * Replay-enabled imports return commit-status information.
1041                  */
1042                 if (lustre_msg_get_last_committed(req->rq_repmsg)) {
1043                         imp->imp_peer_committed_transno =
1044                                 lustre_msg_get_last_committed(req->rq_repmsg);
1045                 }
1046                 ptlrpc_free_committed(imp);
1047                 spin_unlock(&imp->imp_lock);
1048         }
1049
1050         RETURN(rc);
1051 }
1052
1053 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1054 {
1055         struct obd_import     *imp;
1056         int rc;
1057         ENTRY;
1058
1059         LASSERT(req->rq_phase == RQ_PHASE_NEW);
1060         if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()))
1061                 RETURN (0);
1062
1063         req->rq_phase = RQ_PHASE_RPC;
1064
1065         imp = req->rq_import;
1066         spin_lock(&imp->imp_lock);
1067
1068         req->rq_import_generation = imp->imp_generation;
1069
1070         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1071                 spin_lock (&req->rq_lock);
1072                 req->rq_waiting = 1;
1073                 spin_unlock (&req->rq_lock);
1074
1075                 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
1076                           "(%s != %s)",
1077                           lustre_msg_get_status(req->rq_reqmsg) ,
1078                           ptlrpc_import_state_name(req->rq_send_state),
1079                           ptlrpc_import_state_name(imp->imp_state));
1080                 LASSERT(list_empty (&req->rq_list));
1081
1082                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1083                 spin_unlock(&imp->imp_lock);
1084                 RETURN(0);
1085         }
1086
1087         if (rc != 0) {
1088                 spin_unlock(&imp->imp_lock);
1089                 req->rq_status = rc;
1090                 req->rq_phase = RQ_PHASE_INTERPRET;
1091                 RETURN(rc);
1092         }
1093
1094         /* XXX this is the same as ptlrpc_queue_wait */
1095         LASSERT(list_empty(&req->rq_list));
1096         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1097         spin_unlock(&imp->imp_lock);
1098
1099         lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
1100
1101         rc = sptlrpc_req_refresh_ctx(req, -1);
1102         if (rc) {
1103                 if (req->rq_err) {
1104                         req->rq_status = rc;
1105                         RETURN(1);
1106                 } else {
1107                         req->rq_wait_ctx = 1;
1108                         RETURN(0);
1109                 }
1110         }
1111
1112         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
1113                " %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
1114                imp->imp_obd->obd_uuid.uuid,
1115                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1116                libcfs_nid2str(imp->imp_connection->c_peer.nid),
1117                lustre_msg_get_opc(req->rq_reqmsg));
1118
1119         rc = ptl_send_rpc(req, 0);
1120         if (rc) {
1121                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
1122                 req->rq_net_err = 1;
1123                 RETURN(rc);
1124         }
1125         RETURN(0);
1126 }
1127
1128 /* this sends any unsent RPCs in @set and returns TRUE if all are sent */
1129 int ptlrpc_check_set(struct ptlrpc_request_set *set)
1130 {
1131         struct list_head *tmp;
1132         int force_timer_recalc = 0;
1133         ENTRY;
1134
1135         if (set->set_remaining == 0)
1136                 RETURN(1);
1137
1138         list_for_each(tmp, &set->set_requests) {
1139                 struct ptlrpc_request *req =
1140                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1141                 struct obd_import *imp = req->rq_import;
1142                 int rc = 0;
1143
1144                 if (req->rq_phase == RQ_PHASE_NEW &&
1145                     ptlrpc_send_new_req(req)) {
1146                         force_timer_recalc = 1;
1147                 }
1148                 /* delayed send - skip */
1149                 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1150                         continue;
1151
1152                 if (!(req->rq_phase == RQ_PHASE_RPC ||
1153                       req->rq_phase == RQ_PHASE_BULK ||
1154                       req->rq_phase == RQ_PHASE_INTERPRET ||
1155                       req->rq_phase == RQ_PHASE_COMPLETE)) {
1156                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1157                         LBUG();
1158                 }
1159
1160                 if (req->rq_phase == RQ_PHASE_COMPLETE)
1161                         continue;
1162
1163                 if (req->rq_phase == RQ_PHASE_INTERPRET)
1164                         GOTO(interpret, req->rq_status);
1165
1166                 if (req->rq_net_err && !req->rq_timedout)
1167                         ptlrpc_expire_one_request(req);
1168
1169                 if (req->rq_err) {
1170                         ptlrpc_unregister_reply(req);
1171                         req->rq_replied = 0;
1172                         if (req->rq_status == 0)
1173                                 req->rq_status = -EIO;
1174                         req->rq_phase = RQ_PHASE_INTERPRET;
1175
1176                         spin_lock(&imp->imp_lock);
1177                         list_del_init(&req->rq_list);
1178                         spin_unlock(&imp->imp_lock);
1179
1180                         GOTO(interpret, req->rq_status);
1181                 }
1182
1183                 /* ptlrpc_queue_wait->l_wait_event guarantees that rq_intr
1184                  * will only be set after rq_timedout, but the oig waiting
1185                  * path sets rq_intr irrespective of whether ptlrpcd has
1186                  * seen a timeout.  our policy is to only interpret
1187                  * interrupted rpcs after they have timed out */
1188                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
1189                                      req->rq_wait_ctx)) {
1190                         /* NB could be on delayed list */
1191                         ptlrpc_unregister_reply(req);
1192                         req->rq_status = -EINTR;
1193                         req->rq_phase = RQ_PHASE_INTERPRET;
1194
1195                         spin_lock(&imp->imp_lock);
1196                         list_del_init(&req->rq_list);
1197                         spin_unlock(&imp->imp_lock);
1198
1199                         GOTO(interpret, req->rq_status);
1200                 }
1201
1202                 if (req->rq_phase == RQ_PHASE_RPC) {
1203                         if (req->rq_timedout || req->rq_resend ||
1204                             req->rq_waiting || req->rq_wait_ctx) {
1205                                 int status;
1206
1207                                 ptlrpc_unregister_reply(req);
1208
1209                                 spin_lock(&imp->imp_lock);
1210
1211                                 if (ptlrpc_import_delay_req(imp, req, &status)){
1212                                         spin_unlock(&imp->imp_lock);
1213                                         continue;
1214                                 }
1215
1216                                 list_del_init(&req->rq_list);
1217                                 if (status != 0)  {
1218                                         req->rq_status = status;
1219                                         req->rq_phase = RQ_PHASE_INTERPRET;
1220                                         spin_unlock(&imp->imp_lock);
1221                                         GOTO(interpret, req->rq_status);
1222                                 }
1223                                 if (req->rq_no_resend && !req->rq_wait_ctx) {
1224                                         req->rq_status = -ENOTCONN;
1225                                         req->rq_phase = RQ_PHASE_INTERPRET;
1226                                         spin_unlock(&imp->imp_lock);
1227                                         GOTO(interpret, req->rq_status);
1228                                 }
1229                                 list_add_tail(&req->rq_list,
1230                                               &imp->imp_sending_list);
1231
1232                                 spin_unlock(&imp->imp_lock);
1233
1234                                 req->rq_waiting = 0;
1235                                 if (req->rq_resend) {
1236                                         lustre_msg_add_flags(req->rq_reqmsg,
1237                                                              MSG_RESENT);
1238                                         if (req->rq_bulk) {
1239                                                 __u64 old_xid = req->rq_xid;
1240
1241                                                 ptlrpc_unregister_bulk (req);
1242
1243                                                 /* ensure previous bulk fails */
1244                                                 req->rq_xid = ptlrpc_next_xid();
1245                                                 CDEBUG(D_HA, "resend bulk "
1246                                                        "old x"LPU64
1247                                                        " new x"LPU64"\n",
1248                                                        old_xid, req->rq_xid);
1249                                         }
1250                                 }
1251                                 /*
1252                                  * rq_wait_ctx is only touched by ptlrpcd,
1253                                  * so no lock is needed here.
1254                                  */
1255                                 status = sptlrpc_req_refresh_ctx(req, -1);
1256                                 if (status) {
1257                                         if (req->rq_err) {
1258                                                 req->rq_status = status;
1259                                                 force_timer_recalc = 1;
1260                                         } else {
1261                                                 req->rq_wait_ctx = 1;
1262                                         }
1263
1264                                         continue;
1265                                 } else {
1266                                         req->rq_wait_ctx = 0;
1267                                 }
1268
1269                                 rc = ptl_send_rpc(req, 0);
1270                                 if (rc) {
1271                                         DEBUG_REQ(D_HA, req, "send failed (%d)",
1272                                                   rc);
1273                                         force_timer_recalc = 1;
1274                                         req->rq_net_err = 1;
1275                                 }
1276                                 /* need to reset the timeout */
1277                                 force_timer_recalc = 1;
1278                         }
1279
1280                         spin_lock(&req->rq_lock);
1281
1282                         if (req->rq_early) {
1283                                 ptlrpc_at_recv_early_reply(req);
1284                                 spin_unlock(&req->rq_lock);
1285                                 continue;
1286                         }
1287
1288                         /* Still waiting for a reply? */
1289                         if (req->rq_receiving_reply) {
1290                                 spin_unlock(&req->rq_lock);
1291                                 continue;
1292                         }
1293
1294                         /* Did we actually receive a reply? */
1295                         if (!req->rq_replied) {
1296                                 spin_unlock(&req->rq_lock);
1297                                 continue;
1298                         }
1299
1300                         spin_unlock(&req->rq_lock);
1301
1302                         spin_lock(&imp->imp_lock);
1303                         list_del_init(&req->rq_list);
1304                         spin_unlock(&imp->imp_lock);
1305
1306                         req->rq_status = after_reply(req);
1307                         if (req->rq_resend) {
1308                                 /* Add this req to the delayed list so
1309                                    it can be errored if the import is
1310                                    evicted after recovery. */
1311                                 spin_lock(&imp->imp_lock);
1312                                 list_add_tail(&req->rq_list,
1313                                               &imp->imp_delayed_list);
1314                                 spin_unlock(&imp->imp_lock);
1315                                 continue;
1316                         }
1317
1318                         /* If there is no bulk associated with this request,
1319                          * then we're done and should let the interpreter
1320                          * process the reply.  Similarly if the RPC returned
1321                          * an error, and therefore the bulk will never arrive.
1322                          */
1323                         if (req->rq_bulk == NULL || req->rq_status != 0) {
1324                                 req->rq_phase = RQ_PHASE_INTERPRET;
1325                                 GOTO(interpret, req->rq_status);
1326                         }
1327
1328                         req->rq_phase = RQ_PHASE_BULK;
1329                 }
1330
1331                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
1332                 if (ptlrpc_bulk_active(req->rq_bulk))
1333                         continue;
1334
1335                 if (!req->rq_bulk->bd_success) {
1336                         /* The RPC reply arrived OK, but the bulk screwed
1337                          * up!  Dead wierd since the server told us the RPC
1338                          * was good after getting the REPLY for her GET or
1339                          * the ACK for her PUT. */
1340                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1341                         LBUG();
1342                 }
1343
1344                 req->rq_phase = RQ_PHASE_INTERPRET;
1345
1346         interpret:
1347                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
1348                 LASSERT(!req->rq_receiving_reply);
1349
1350                 ptlrpc_unregister_reply(req);
1351                 if (req->rq_bulk != NULL)
1352                         ptlrpc_unregister_bulk (req);
1353
1354                 if (req->rq_interpret_reply != NULL) {
1355                         int (*interpreter)(struct ptlrpc_request *,void *,int) =
1356                                 req->rq_interpret_reply;
1357                         req->rq_status = interpreter(req, &req->rq_async_args,
1358                                                      req->rq_status);
1359                 }
1360                 req->rq_phase = RQ_PHASE_COMPLETE;
1361
1362                 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:"
1363                        "opc %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
1364                        imp->imp_obd->obd_uuid.uuid,
1365                        lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1366                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
1367                        lustre_msg_get_opc(req->rq_reqmsg));
1368
1369                 atomic_dec(&imp->imp_inflight);
1370                 set->set_remaining--;
1371                 cfs_waitq_signal(&imp->imp_recovery_waitq);
1372         }
1373
1374         /* If we hit an error, we want to recover promptly. */
1375         RETURN(set->set_remaining == 0 || force_timer_recalc);
1376 }
1377
1378 /* Return 1 if we should give up, else 0 */
1379 int ptlrpc_expire_one_request(struct ptlrpc_request *req)
1380 {
1381         struct obd_import *imp = req->rq_import;
1382         int rc = 0;
1383         ENTRY;
1384
1385         DEBUG_REQ(D_ERROR|D_NETERROR, req,
1386                   "%s (sent at "CFS_TIME_T", "CFS_DURATION_T"s ago)",
1387                   req->rq_net_err ? "network error" : "timeout",
1388                   req->rq_sent, cfs_time_sub(cfs_time_current_sec(),
1389                   req->rq_sent));
1390
1391         if (imp) {
1392                 LCONSOLE_WARN("Request x"LPU64" sent from %s to NID %s "
1393                               CFS_DURATION_T"s ago has timed out "
1394                               "(limit "CFS_DURATION_T"s).\n", req->rq_xid,
1395                               req->rq_import->imp_obd->obd_name,
1396                               libcfs_nid2str(imp->imp_connection->c_peer.nid),
1397                               cfs_time_sub(cfs_time_current_sec(), req->rq_sent),
1398                               cfs_time_sub(req->rq_deadline, req->rq_sent));
1399         }
1400
1401         if (imp != NULL && obd_debug_peer_on_timeout)
1402                 LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
1403
1404         spin_lock(&req->rq_lock);
1405         req->rq_timedout = 1;
1406         spin_unlock(&req->rq_lock);
1407
1408         ptlrpc_unregister_reply (req);
1409
1410         if (obd_dump_on_timeout)
1411                 libcfs_debug_dumplog();
1412
1413         if (req->rq_bulk != NULL)
1414                 ptlrpc_unregister_bulk (req);
1415
1416         if (imp == NULL) {
1417                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
1418                 RETURN(1);
1419         }
1420
1421         /* The DLM server doesn't want recovery run on its imports. */
1422         if (imp->imp_dlm_fake)
1423                 RETURN(1);
1424
1425         /* If this request is for recovery or other primordial tasks,
1426          * then error it out here. */
1427         if (req->rq_ctx_init || req->rq_ctx_fini ||
1428             req->rq_send_state != LUSTRE_IMP_FULL ||
1429             imp->imp_obd->obd_no_recov) {
1430                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
1431                           ptlrpc_import_state_name(req->rq_send_state),
1432                           ptlrpc_import_state_name(imp->imp_state));
1433                 spin_lock(&req->rq_lock);
1434                 req->rq_status = -ETIMEDOUT;
1435                 req->rq_err = 1;
1436                 spin_unlock(&req->rq_lock);
1437                 RETURN(1);
1438         }
1439
1440         /* if a request can't be resent we can't wait for an answer after
1441            the timeout */
1442         if (req->rq_no_resend) {
1443                 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
1444                 rc = 1;
1445         }
1446
1447         ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
1448
1449         RETURN(rc);
1450 }
1451
1452 int ptlrpc_expired_set(void *data)
1453 {
1454         struct ptlrpc_request_set *set = data;
1455         struct list_head          *tmp;
1456         time_t                     now = cfs_time_current_sec();
1457         ENTRY;
1458
1459         LASSERT(set != NULL);
1460
1461         /* A timeout expired; see which reqs it applies to... */
1462         list_for_each (tmp, &set->set_requests) {
1463                 struct ptlrpc_request *req =
1464                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1465
1466                 /* request in-flight? */
1467                 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting &&
1468                        !req->rq_resend) ||
1469                       (req->rq_phase == RQ_PHASE_BULK)))
1470                         continue;
1471
1472                 if (req->rq_timedout ||           /* already dealt with */
1473                     req->rq_deadline > now)       /* not expired */
1474                         continue;
1475
1476                 /* deal with this guy */
1477                 ptlrpc_expire_one_request (req);
1478         }
1479
1480         /* When waiting for a whole set, we always to break out of the
1481          * sleep so we can recalculate the timeout, or enable interrupts
1482          * iff everyone's timed out.
1483          */
1484         RETURN(1);
1485 }
1486
1487 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
1488 {
1489         spin_lock(&req->rq_lock);
1490         req->rq_intr = 1;
1491         spin_unlock(&req->rq_lock);
1492 }
1493
1494 void ptlrpc_interrupted_set(void *data)
1495 {
1496         struct ptlrpc_request_set *set = data;
1497         struct list_head *tmp;
1498
1499         LASSERT(set != NULL);
1500         CERROR("INTERRUPTED SET %p\n", set);
1501
1502         list_for_each(tmp, &set->set_requests) {
1503                 struct ptlrpc_request *req =
1504                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1505
1506                 if (req->rq_phase != RQ_PHASE_RPC)
1507                         continue;
1508
1509                 ptlrpc_mark_interrupted(req);
1510         }
1511 }
1512
1513 /* get the smallest timeout in the set; this does NOT set a timeout. */
1514 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
1515 {
1516         struct list_head      *tmp;
1517         time_t                 now = cfs_time_current_sec();
1518         int                    timeout = 0;
1519         struct ptlrpc_request *req;
1520         int                    deadline;
1521         ENTRY;
1522
1523         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
1524
1525         list_for_each(tmp, &set->set_requests) {
1526                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1527
1528                 /* request in-flight? */
1529                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
1530                       (req->rq_phase == RQ_PHASE_BULK) ||
1531                       (req->rq_phase == RQ_PHASE_NEW)))
1532                         continue;
1533
1534                 if (req->rq_timedout)   /* already timed out */
1535                         continue;
1536
1537                 if (req->rq_wait_ctx)   /* waiting for ctx */
1538                         continue;
1539
1540                 if (req->rq_phase == RQ_PHASE_NEW)
1541                         deadline = req->rq_sent;
1542                 else
1543                         deadline = req->rq_sent + req->rq_timeout;
1544
1545                 if (deadline <= now)    /* actually expired already */
1546                         timeout = 1;    /* ASAP */
1547                 else if (timeout == 0 || timeout > deadline - now)
1548                         timeout = deadline - now;
1549         }
1550         RETURN(timeout);
1551 }
1552
1553 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
1554 {
1555         struct list_head      *tmp;
1556         struct ptlrpc_request *req;
1557         struct l_wait_info     lwi;
1558         int                    rc, timeout;
1559         ENTRY;
1560
1561         if (list_empty(&set->set_requests))
1562                 RETURN(0);
1563
1564         list_for_each(tmp, &set->set_requests) {
1565                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1566                 if (req->rq_phase == RQ_PHASE_NEW)
1567                         (void)ptlrpc_send_new_req(req);
1568         }
1569
1570         do {
1571                 timeout = ptlrpc_set_next_timeout(set);
1572
1573                 /* wait until all complete, interrupted, or an in-flight
1574                  * req times out */
1575                 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
1576                        set, timeout);
1577                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout ? timeout : 1),
1578                                        ptlrpc_expired_set,
1579                                        ptlrpc_interrupted_set, set);
1580                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
1581
1582                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
1583
1584                 /* -EINTR => all requests have been flagged rq_intr so next
1585                  * check completes.
1586                  * -ETIMEOUTD => someone timed out.  When all reqs have
1587                  * timed out, signals are enabled allowing completion with
1588                  * EINTR.
1589                  * I don't really care if we go once more round the loop in
1590                  * the error cases -eeb. */
1591         } while (rc != 0 || set->set_remaining != 0);
1592
1593         LASSERT(set->set_remaining == 0);
1594
1595         rc = 0;
1596         list_for_each(tmp, &set->set_requests) {
1597                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1598
1599                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
1600                 if (req->rq_status != 0)
1601                         rc = req->rq_status;
1602         }
1603
1604         if (set->set_interpret != NULL) {
1605                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
1606                         set->set_interpret;
1607                 rc = interpreter (set, set->set_arg, rc);
1608         } else {
1609                 struct ptlrpc_set_cbdata *cbdata, *n;
1610                 int err;
1611
1612                 list_for_each_entry_safe(cbdata, n,
1613                                          &set->set_cblist, psc_item) {
1614                         list_del_init(&cbdata->psc_item);
1615                         err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
1616                         if (err && !rc)
1617                                 rc = err;
1618                         OBD_FREE_PTR(cbdata);
1619                 }
1620         }
1621
1622         RETURN(rc);
1623 }
1624
1625 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
1626 {
1627         ENTRY;
1628         if (request == NULL) {
1629                 EXIT;
1630                 return;
1631         }
1632
1633         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
1634         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
1635         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
1636         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
1637         LASSERTF(!request->rq_replay, "req %p\n", request);
1638         LASSERT(request->rq_cli_ctx);
1639
1640         req_capsule_fini(&request->rq_pill);
1641
1642         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
1643          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
1644         if (request->rq_import != NULL) {
1645                 if (!locked)
1646                         spin_lock(&request->rq_import->imp_lock);
1647                 list_del_init(&request->rq_mod_list);
1648                 list_del_init(&request->rq_replay_list);
1649                 if (!locked)
1650                         spin_unlock(&request->rq_import->imp_lock);
1651         }
1652         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
1653
1654         if (atomic_read(&request->rq_refcount) != 0) {
1655                 DEBUG_REQ(D_ERROR, request,
1656                           "freeing request with nonzero refcount");
1657                 LBUG();
1658         }
1659
1660         if (request->rq_repbuf != NULL)
1661                 sptlrpc_cli_free_repbuf(request);
1662         if (request->rq_export != NULL) {
1663                 class_export_put(request->rq_export);
1664                 request->rq_export = NULL;
1665         }
1666         if (request->rq_import != NULL) {
1667                 class_import_put(request->rq_import);
1668                 request->rq_import = NULL;
1669         }
1670         if (request->rq_bulk != NULL)
1671                 ptlrpc_free_bulk(request->rq_bulk);
1672
1673         if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
1674                 sptlrpc_cli_free_reqbuf(request);
1675
1676         sptlrpc_req_put_ctx(request, !locked);
1677
1678         if (request->rq_pool)
1679                 __ptlrpc_free_req_to_pool(request);
1680         else
1681                 OBD_FREE(request, sizeof(*request));
1682         EXIT;
1683 }
1684
1685 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
1686 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
1687 {
1688         LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
1689         (void)__ptlrpc_req_finished(request, 1);
1690 }
1691
1692 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
1693 {
1694         ENTRY;
1695         if (request == NULL)
1696                 RETURN(1);
1697
1698         if (request == LP_POISON ||
1699             request->rq_reqmsg == LP_POISON) {
1700                 CERROR("dereferencing freed request (bug 575)\n");
1701                 LBUG();
1702                 RETURN(1);
1703         }
1704
1705         DEBUG_REQ(D_INFO, request, "refcount now %u",
1706                   atomic_read(&request->rq_refcount) - 1);
1707
1708         if (atomic_dec_and_test(&request->rq_refcount)) {
1709                 __ptlrpc_free_req(request, locked);
1710                 RETURN(1);
1711         }
1712
1713         RETURN(0);
1714 }
1715
1716 void ptlrpc_req_finished(struct ptlrpc_request *request)
1717 {
1718         __ptlrpc_req_finished(request, 0);
1719 }
1720
1721 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
1722 {
1723         return request->rq_xid;
1724 }
1725 EXPORT_SYMBOL(ptlrpc_req_xid);
1726
1727 /* Disengage the client's reply buffer from the network
1728  * NB does _NOT_ unregister any client-side bulk.
1729  * IDEMPOTENT, but _not_ safe against concurrent callers.
1730  * The request owner (i.e. the thread doing the I/O) must call...
1731  */
1732 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
1733 {
1734         int                rc;
1735         cfs_waitq_t       *wq;
1736         struct l_wait_info lwi;
1737
1738         LASSERT(!in_interrupt ());             /* might sleep */
1739         if (!ptlrpc_client_recv_or_unlink(request))
1740                 /* Nothing left to do */
1741                 return;
1742
1743         LNetMDUnlink (request->rq_reply_md_h);
1744
1745         /* We have to l_wait_event() whatever the result, to give liblustre
1746          * a chance to run reply_in_callback(), and to make sure we've
1747          * unlinked before returning a req to the pool */
1748
1749         if (request->rq_set != NULL)
1750                 wq = &request->rq_set->set_waitq;
1751         else
1752                 wq = &request->rq_reply_waitq;
1753
1754         for (;;) {
1755                 /* Network access will complete in finite time but the HUGE
1756                  * timeout lets us CWARN for visibility of sluggish NALs */
1757                 lwi = LWI_TIMEOUT(cfs_time_seconds(LONG_UNLINK), NULL, NULL);
1758                 rc = l_wait_event (*wq, !ptlrpc_client_recv_or_unlink(request),
1759                                    &lwi);
1760                 if (rc == 0)
1761                         return;
1762
1763                 LASSERT (rc == -ETIMEDOUT);
1764                 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
1765                           "rvcng=%d unlnk=%d", request->rq_receiving_reply,
1766                           request->rq_must_unlink);
1767         }
1768 }
1769
1770 /* caller must hold imp->imp_lock */
1771 void ptlrpc_free_committed(struct obd_import *imp)
1772 {
1773         struct list_head *tmp, *saved;
1774         struct ptlrpc_request *req;
1775         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1776         ENTRY;
1777
1778         LASSERT(imp != NULL);
1779
1780         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1781
1782
1783         if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
1784             imp->imp_generation == imp->imp_last_generation_checked) {
1785                 CDEBUG(D_RPCTRACE, "%s: skip recheck: last_committed "LPU64"\n",
1786                        imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1787                 EXIT;
1788                 return;
1789         }
1790
1791         CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
1792                imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
1793                imp->imp_generation);
1794         imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
1795         imp->imp_last_generation_checked = imp->imp_generation;
1796
1797         list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1798                 req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1799
1800                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
1801                 LASSERT(req != last_req);
1802                 last_req = req;
1803
1804                 if (req->rq_import_generation < imp->imp_generation) {
1805                         DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
1806                         GOTO(free_req, 0);
1807                 }
1808
1809                 if (req->rq_replay) {
1810                         DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
1811                         continue;
1812                 }
1813
1814                 /* not yet committed */
1815                 if (req->rq_transno > imp->imp_peer_committed_transno) {
1816                         DEBUG_REQ(D_RPCTRACE, req, "stopping search");
1817                         break;
1818                 }
1819
1820                 DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")",
1821                           imp->imp_peer_committed_transno);
1822 free_req:
1823                 spin_lock(&req->rq_lock);
1824                 req->rq_replay = 0;
1825                 spin_unlock(&req->rq_lock);
1826                 if (req->rq_commit_cb != NULL)
1827                         req->rq_commit_cb(req);
1828                 list_del_init(&req->rq_replay_list);
1829                 __ptlrpc_req_finished(req, 1);
1830         }
1831
1832         EXIT;
1833         return;
1834 }
1835
1836 void ptlrpc_cleanup_client(struct obd_import *imp)
1837 {
1838         ENTRY;
1839         EXIT;
1840         return;
1841 }
1842
1843 void ptlrpc_resend_req(struct ptlrpc_request *req)
1844 {
1845         DEBUG_REQ(D_HA, req, "going to resend");
1846         lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
1847         req->rq_status = -EAGAIN;
1848
1849         spin_lock(&req->rq_lock);
1850         req->rq_resend = 1;
1851         req->rq_net_err = 0;
1852         req->rq_timedout = 0;
1853         if (req->rq_bulk) {
1854                 __u64 old_xid = req->rq_xid;
1855
1856                 /* ensure previous bulk fails */
1857                 req->rq_xid = ptlrpc_next_xid();
1858                 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
1859                        old_xid, req->rq_xid);
1860         }
1861         ptlrpc_wake_client_req(req);
1862         spin_unlock(&req->rq_lock);
1863 }
1864
1865 /* XXX: this function and rq_status are currently unused */
1866 void ptlrpc_restart_req(struct ptlrpc_request *req)
1867 {
1868         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1869         req->rq_status = -ERESTARTSYS;
1870
1871         spin_lock(&req->rq_lock);
1872         req->rq_restart = 1;
1873         req->rq_timedout = 0;
1874         ptlrpc_wake_client_req(req);
1875         spin_unlock(&req->rq_lock);
1876 }
1877
1878 static int expired_request(void *data)
1879 {
1880         struct ptlrpc_request *req = data;
1881         ENTRY;
1882
1883         /* some failure can suspend regular timeouts */
1884         if (ptlrpc_check_suspend())
1885                 RETURN(1);
1886
1887         /* deadline may have changed with an early reply */
1888         if (req->rq_deadline > cfs_time_current_sec())
1889                 RETURN(1);
1890
1891         RETURN(ptlrpc_expire_one_request(req));
1892 }
1893
1894 static void interrupted_request(void *data)
1895 {
1896         struct ptlrpc_request *req = data;
1897         DEBUG_REQ(D_HA, req, "request interrupted");
1898         spin_lock(&req->rq_lock);
1899         req->rq_intr = 1;
1900         spin_unlock(&req->rq_lock);
1901 }
1902
1903 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1904 {
1905         ENTRY;
1906         atomic_inc(&req->rq_refcount);
1907         RETURN(req);
1908 }
1909
1910 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1911                                       struct obd_import *imp)
1912 {
1913         struct list_head *tmp;
1914
1915         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1916
1917         /* clear this for new requests that were resent as well
1918            as resent replayed requests. */
1919         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1920
1921         /* don't re-add requests that have been replayed */
1922         if (!list_empty(&req->rq_replay_list))
1923                 return;
1924
1925         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
1926
1927         LASSERT(imp->imp_replayable);
1928         /* Balanced in ptlrpc_free_committed, usually. */
1929         ptlrpc_request_addref(req);
1930         list_for_each_prev(tmp, &imp->imp_replay_list) {
1931                 struct ptlrpc_request *iter =
1932                         list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1933
1934                 /* We may have duplicate transnos if we create and then
1935                  * open a file, or for closes retained if to match creating
1936                  * opens, so use req->rq_xid as a secondary key.
1937                  * (See bugs 684, 685, and 428.)
1938                  * XXX no longer needed, but all opens need transnos!
1939                  */
1940                 if (iter->rq_transno > req->rq_transno)
1941                         continue;
1942
1943                 if (iter->rq_transno == req->rq_transno) {
1944                         LASSERT(iter->rq_xid != req->rq_xid);
1945                         if (iter->rq_xid > req->rq_xid)
1946                                 continue;
1947                 }
1948
1949                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
1950                 return;
1951         }
1952
1953         list_add_tail(&req->rq_replay_list, &imp->imp_replay_list);
1954 }
1955
1956 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1957 {
1958         int rc = 0;
1959         int brc;
1960         struct l_wait_info lwi;
1961         struct obd_import *imp = req->rq_import;
1962         cfs_duration_t timeout = CFS_TICK;
1963         long timeoutl;
1964         ENTRY;
1965
1966         LASSERT(req->rq_set == NULL);
1967         LASSERT(!req->rq_receiving_reply);
1968         atomic_inc(&imp->imp_inflight);
1969
1970         /* for distributed debugging */
1971         lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
1972         LASSERT(imp->imp_obd != NULL);
1973         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc "
1974                "%s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
1975                imp->imp_obd->obd_uuid.uuid,
1976                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1977                libcfs_nid2str(imp->imp_connection->c_peer.nid),
1978                lustre_msg_get_opc(req->rq_reqmsg));
1979
1980         /* Mark phase here for a little debug help */
1981         req->rq_phase = RQ_PHASE_RPC;
1982
1983         spin_lock(&imp->imp_lock);
1984         req->rq_import_generation = imp->imp_generation;
1985 restart:
1986         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1987                 list_del(&req->rq_list);
1988
1989                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1990                 spin_unlock(&imp->imp_lock);
1991
1992                 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%s != %s)",
1993                           cfs_curproc_comm(),
1994                           ptlrpc_import_state_name(req->rq_send_state),
1995                           ptlrpc_import_state_name(imp->imp_state));
1996                 lwi = LWI_INTR(interrupted_request, req);
1997                 rc = l_wait_event(req->rq_reply_waitq,
1998                                   (req->rq_send_state == imp->imp_state ||
1999                                    req->rq_err || req->rq_intr),
2000                                   &lwi);
2001                 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%s == %s or %d/%d == 1)",
2002                           cfs_curproc_comm(),
2003                           ptlrpc_import_state_name(imp->imp_state),
2004                           ptlrpc_import_state_name(req->rq_send_state),
2005                           req->rq_err, req->rq_intr);
2006
2007                 spin_lock(&imp->imp_lock);
2008                 list_del_init(&req->rq_list);
2009
2010                 if (req->rq_err) {
2011                         /* rq_status was set locally */
2012                         rc = -EIO;
2013                 }
2014                 else if (req->rq_intr) {
2015                         rc = -EINTR;
2016                 }
2017                 else if (req->rq_no_resend) {
2018                         spin_unlock(&imp->imp_lock);
2019                         GOTO(out, rc = -ETIMEDOUT);
2020                 }
2021                 else {
2022                         GOTO(restart, rc);
2023                 }
2024         }
2025
2026         if (rc != 0) {
2027                 list_del_init(&req->rq_list);
2028                 spin_unlock(&imp->imp_lock);
2029                 req->rq_status = rc; // XXX this ok?
2030                 GOTO(out, rc);
2031         }
2032
2033         if (req->rq_resend) {
2034                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
2035
2036                 if (req->rq_bulk != NULL) {
2037                         ptlrpc_unregister_bulk (req);
2038
2039                         /* bulk requests are supposed to be
2040                          * idempotent, so we are free to bump the xid
2041                          * here, which we need to do before
2042                          * registering the bulk again (bug 6371).
2043                          * print the old xid first for sanity.
2044                          */
2045                         DEBUG_REQ(D_HA, req, "bumping xid for bulk: ");
2046                         req->rq_xid = ptlrpc_next_xid();
2047                 }
2048
2049                 DEBUG_REQ(D_HA, req, "resending: ");
2050         }
2051
2052         /* XXX this is the same as ptlrpc_set_wait */
2053         LASSERT(list_empty(&req->rq_list));
2054         list_add_tail(&req->rq_list, &imp->imp_sending_list);
2055         spin_unlock(&imp->imp_lock);
2056
2057         rc = sptlrpc_req_refresh_ctx(req, 0);
2058         if (rc) {
2059                 if (req->rq_err) {
2060                         /* we got fatal ctx refresh error, directly jump out
2061                          * thus we can pass back the actual error code.
2062                          */
2063                         spin_lock(&imp->imp_lock);
2064                         list_del_init(&req->rq_list);
2065                         spin_unlock(&imp->imp_lock);
2066
2067                         CERROR("Failed to refresh ctx of req %p: %d\n", req, rc);
2068                         GOTO(out, rc);
2069                 }
2070                 /* simulating we got error during send rpc */
2071                 goto after_send;
2072         }
2073
2074         rc = ptl_send_rpc(req, 0);
2075         if (rc)
2076                 DEBUG_REQ(D_HA, req, "send failed (%d); recovering", rc);
2077
2078 repeat:
2079         timeoutl = req->rq_deadline - cfs_time_current_sec();
2080         timeout = (timeoutl <= 0 || rc) ? CFS_TICK :
2081                 cfs_time_seconds(timeoutl);
2082         DEBUG_REQ(D_NET, req,
2083                   "-- sleeping for "CFS_DURATION_T" ticks", timeout);
2084         lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
2085                                req);
2086         rc = l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req), &lwi);
2087         if (rc == -ETIMEDOUT && ((req->rq_deadline > cfs_time_current_sec()) ||
2088                                  ptlrpc_check_and_wait_suspend(req)))
2089                 goto repeat;
2090
2091 after_send:
2092         CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:opc "
2093                "%s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
2094                imp->imp_obd->obd_uuid.uuid,
2095                lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
2096                libcfs_nid2str(imp->imp_connection->c_peer.nid),
2097                lustre_msg_get_opc(req->rq_reqmsg));
2098
2099         spin_lock(&imp->imp_lock);
2100         list_del_init(&req->rq_list);
2101         spin_unlock(&imp->imp_lock);
2102
2103         /* If the reply was received normally, this just grabs the spinlock
2104          * (ensuring the reply callback has returned), sees that
2105          * req->rq_receiving_reply is clear and returns. */
2106         ptlrpc_unregister_reply (req);
2107
2108
2109         if (req->rq_err) {
2110                 DEBUG_REQ(D_RPCTRACE, req, "err rc=%d status=%d",
2111                           rc, req->rq_status);
2112                 GOTO(out, rc = -EIO);
2113         }
2114
2115         if (req->rq_intr) {
2116                 /* Should only be interrupted if we timed out. */
2117                 if (!req->rq_timedout)
2118                         DEBUG_REQ(D_ERROR, req,
2119                                   "rq_intr set but rq_timedout not");
2120                 GOTO(out, rc = -EINTR);
2121         }
2122
2123         /* Resend if we need to */
2124         if (req->rq_resend) {
2125                 /* ...unless we were specifically told otherwise. */
2126                 if (req->rq_no_resend)
2127                         GOTO(out, rc = -ETIMEDOUT);
2128                 spin_lock(&imp->imp_lock);
2129                 goto restart;
2130         }
2131
2132         if (req->rq_timedout) {                 /* non-recoverable timeout */
2133                 GOTO(out, rc = -ETIMEDOUT);
2134         }
2135
2136         if (!req->rq_replied) {
2137                 /* How can this be? -eeb */
2138                 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
2139                 LBUG();
2140                 GOTO(out, rc = req->rq_status);
2141         }
2142
2143         rc = after_reply(req);
2144         /* NB may return +ve success rc */
2145         if (req->rq_resend) {
2146                 spin_lock(&imp->imp_lock);
2147                 goto restart;
2148         }
2149
2150  out:
2151         if (req->rq_bulk != NULL) {
2152                 if (rc >= 0) {
2153                         /* success so far.  Note that anything going wrong
2154                          * with bulk now, is EXTREMELY strange, since the
2155                          * server must have believed that the bulk
2156                          * tranferred OK before she replied with success to
2157                          * me. */
2158                         lwi = LWI_TIMEOUT(timeout, NULL, NULL);
2159                         brc = l_wait_event(req->rq_reply_waitq,
2160                                            !ptlrpc_bulk_active(req->rq_bulk),
2161                                            &lwi);
2162                         LASSERT(brc == 0 || brc == -ETIMEDOUT);
2163                         if (brc != 0) {
2164                                 LASSERT(brc == -ETIMEDOUT);
2165                                 DEBUG_REQ(D_ERROR, req, "bulk timed out");
2166                                 rc = brc;
2167                         } else if (!req->rq_bulk->bd_success) {
2168                                 DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
2169                                 rc = -EIO;
2170                         }
2171                 }
2172                 if (rc < 0)
2173                         ptlrpc_unregister_bulk (req);
2174         }
2175
2176         LASSERT(!req->rq_receiving_reply);
2177         req->rq_phase = RQ_PHASE_INTERPRET;
2178
2179         atomic_dec(&imp->imp_inflight);
2180         cfs_waitq_signal(&imp->imp_recovery_waitq);
2181         RETURN(rc);
2182 }
2183
2184 struct ptlrpc_replay_async_args {
2185         int praa_old_state;
2186         int praa_old_status;
2187 };
2188
2189 static int ptlrpc_replay_interpret(struct ptlrpc_request *req,
2190                                     void * data, int rc)
2191 {
2192         struct ptlrpc_replay_async_args *aa = data;
2193         struct obd_import *imp = req->rq_import;
2194
2195         ENTRY;
2196         atomic_dec(&imp->imp_replay_inflight);
2197
2198         if (!req->rq_replied) {
2199                 CERROR("request replay timed out, restarting recovery\n");
2200                 GOTO(out, rc = -ETIMEDOUT);
2201         }
2202
2203         if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
2204             (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
2205              lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
2206                 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
2207
2208         /* The transno had better not change over replay. */
2209         LASSERT(lustre_msg_get_transno(req->rq_reqmsg) ==
2210                 lustre_msg_get_transno(req->rq_repmsg));
2211
2212         DEBUG_REQ(D_HA, req, "got rep");
2213
2214         /* let the callback do fixups, possibly including in the request */
2215         if (req->rq_replay_cb)
2216                 req->rq_replay_cb(req);
2217
2218         if (req->rq_replied &&
2219             lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
2220                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
2221                           lustre_msg_get_status(req->rq_repmsg),
2222                           aa->praa_old_status);
2223         } else {
2224                 /* Put it back for re-replay. */
2225                 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
2226         }
2227
2228         /*
2229          * Errors while replay can set transno to 0, but
2230          * imp_last_replay_transno shouldn't be set to 0 anyway
2231          */
2232         if (req->rq_transno > 0) {
2233                 spin_lock(&imp->imp_lock);
2234                 LASSERT(req->rq_transno <= imp->imp_last_replay_transno);
2235                 imp->imp_last_replay_transno = req->rq_transno;
2236                 spin_unlock(&imp->imp_lock);
2237         } else
2238                 CERROR("Transno is 0 during replay!\n");
2239         /* continue with recovery */
2240         rc = ptlrpc_import_recovery_state_machine(imp);
2241  out:
2242         req->rq_send_state = aa->praa_old_state;
2243
2244         if (rc != 0)
2245                 /* this replay failed, so restart recovery */
2246                 ptlrpc_connect_import(imp, NULL);
2247
2248         RETURN(rc);
2249 }
2250
2251 int ptlrpc_replay_req(struct ptlrpc_request *req)
2252 {
2253         struct ptlrpc_replay_async_args *aa;
2254         ENTRY;
2255
2256         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
2257         /* Not handling automatic bulk replay yet (or ever?) */
2258         LASSERT(req->rq_bulk == NULL);
2259
2260         LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
2261         aa = ptlrpc_req_async_args(req);
2262         memset(aa, 0, sizeof *aa);
2263
2264         /* Prepare request to be resent with ptlrpcd */
2265         aa->praa_old_state = req->rq_send_state;
2266         req->rq_send_state = LUSTRE_IMP_REPLAY;
2267         req->rq_phase = RQ_PHASE_NEW;
2268         if (req->rq_repmsg)
2269                 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
2270         req->rq_status = 0;
2271         req->rq_interpret_reply = ptlrpc_replay_interpret;
2272         /* Readjust the timeout for current conditions */
2273         ptlrpc_at_set_req_timeout(req);
2274
2275         DEBUG_REQ(D_HA, req, "REPLAY");
2276
2277         atomic_inc(&req->rq_import->imp_replay_inflight);
2278         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
2279
2280         ptlrpcd_add_req(req);
2281         RETURN(0);
2282 }
2283
2284 void ptlrpc_abort_inflight(struct obd_import *imp)
2285 {
2286         struct list_head *tmp, *n;
2287         ENTRY;
2288
2289         /* Make sure that no new requests get processed for this import.
2290          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
2291          * this flag and then putting requests on sending_list or delayed_list.
2292          */
2293         spin_lock(&imp->imp_lock);
2294
2295         /* XXX locking?  Maybe we should remove each request with the list
2296          * locked?  Also, how do we know if the requests on the list are
2297          * being freed at this time?
2298          */
2299         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
2300                 struct ptlrpc_request *req =
2301                         list_entry(tmp, struct ptlrpc_request, rq_list);
2302
2303                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
2304
2305                 spin_lock (&req->rq_lock);
2306                 if (req->rq_import_generation < imp->imp_generation) {
2307                         req->rq_err = 1;
2308                         req->rq_status = -EINTR;
2309                         ptlrpc_wake_client_req(req);
2310                 }
2311                 spin_unlock (&req->rq_lock);
2312         }
2313
2314         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
2315                 struct ptlrpc_request *req =
2316                         list_entry(tmp, struct ptlrpc_request, rq_list);
2317
2318                 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
2319
2320                 spin_lock (&req->rq_lock);
2321                 if (req->rq_import_generation < imp->imp_generation) {
2322                         req->rq_err = 1;
2323                         req->rq_status = -EINTR;
2324                         ptlrpc_wake_client_req(req);
2325                 }
2326                 spin_unlock (&req->rq_lock);
2327         }
2328
2329         /* Last chance to free reqs left on the replay list, but we
2330          * will still leak reqs that haven't committed.  */
2331         if (imp->imp_replayable)
2332                 ptlrpc_free_committed(imp);
2333
2334         spin_unlock(&imp->imp_lock);
2335
2336         EXIT;
2337 }
2338
2339 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
2340 {
2341         struct list_head *tmp, *n;
2342
2343         LASSERT(set != NULL);
2344
2345         list_for_each_safe(tmp, n, &set->set_requests) {
2346                 struct ptlrpc_request *req =
2347                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
2348
2349                 spin_lock (&req->rq_lock);
2350                 if (req->rq_phase != RQ_PHASE_RPC) {
2351                         spin_unlock (&req->rq_lock);
2352                         continue;
2353                 }
2354
2355                 req->rq_err = 1;
2356                 req->rq_status = -EINTR;
2357                 ptlrpc_wake_client_req(req);
2358                 spin_unlock (&req->rq_lock);
2359         }
2360 }
2361
2362 static __u64 ptlrpc_last_xid;
2363 static spinlock_t ptlrpc_last_xid_lock;
2364
2365 /* Initialize the XID for the node.  This is common among all requests on
2366  * this node, and only requires the property that it is monotonically
2367  * increasing.  It does not need to be sequential.  Since this is also used
2368  * as the RDMA match bits, it is important that a single client NOT have
2369  * the same match bits for two different in-flight requests, hence we do
2370  * NOT want to have an XID per target or similar.
2371  *
2372  * To avoid an unlikely collision between match bits after a client reboot
2373  * (which would cause old to be delivered into the wrong buffer) we initialize
2374  * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
2375  * If the time is clearly incorrect, we instead use a 62-bit random number.
2376  * In the worst case the random number will overflow 1M RPCs per second in
2377  * 9133 years, or permutations thereof.
2378  */
2379 #define YEAR_2004 (1ULL << 30)
2380 void ptlrpc_init_xid(void)
2381 {
2382         time_t now = cfs_time_current_sec();
2383
2384         spin_lock_init(&ptlrpc_last_xid_lock);
2385         if (now < YEAR_2004) {
2386                 ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
2387                 ptlrpc_last_xid >>= 2;
2388                 ptlrpc_last_xid |= (1ULL << 61);
2389         } else {
2390                 ptlrpc_last_xid = (now << 20);
2391         }
2392 }
2393
2394 __u64 ptlrpc_next_xid(void)
2395 {
2396         __u64 tmp;
2397         spin_lock(&ptlrpc_last_xid_lock);
2398         tmp = ++ptlrpc_last_xid;
2399         spin_unlock(&ptlrpc_last_xid_lock);
2400         return tmp;
2401 }
2402
2403 __u64 ptlrpc_sample_next_xid(void)
2404 {
2405 #if BITS_PER_LONG == 32
2406         /* need to avoid possible word tearing on 32-bit systems */
2407         __u64 tmp;
2408         spin_lock(&ptlrpc_last_xid_lock);
2409         tmp = ptlrpc_last_xid + 1;
2410         spin_unlock(&ptlrpc_last_xid_lock);
2411         return tmp;
2412 #else
2413         /* No need to lock, since returned value is racy anyways */
2414         return ptlrpc_last_xid + 1;
2415 #endif
2416 }
2417 EXPORT_SYMBOL(ptlrpc_sample_next_xid);