Whamcloud - gitweb
Branch b1_4_mountconf
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of the Lustre file system, http://www.lustre.org
7  *   Lustre is a trademark of Cluster File Systems, Inc.
8  *
9  *   You may have signed or agreed to another license before downloading
10  *   this software.  If so, you are bound by the terms and conditions
11  *   of that agreement, and the following does not apply to you.  See the
12  *   LICENSE file included with this distribution for more information.
13  *
14  *   If you did not agree to a different license, then this copy of Lustre
15  *   is open source software; you can redistribute it and/or modify it
16  *   under the terms of version 2 of the GNU General Public License as
17  *   published by the Free Software Foundation.
18  *
19  *   In either case, Lustre is distributed in the hope that it will be
20  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  *   license text for more details.
23  *
24  */
25
26 #define DEBUG_SUBSYSTEM S_RPC
27 #ifndef __KERNEL__
28 #include <errno.h>
29 #include <signal.h>
30 #include <liblustre.h>
31 #endif
32
33 #include <linux/obd_support.h>
34 #include <linux/obd_class.h>
35 #include <linux/lustre_lib.h>
36 #include <linux/lustre_ha.h>
37 #include <linux/lustre_import.h>
38
39 #include "ptlrpc_internal.h"
40
41 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
42                         struct ptlrpc_client *cl)
43 {
44         cl->cli_request_portal = req_portal;
45         cl->cli_reply_portal   = rep_portal;
46         cl->cli_name           = name;
47 }
48
49 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
50 {
51         struct ptlrpc_connection *c;
52         lnet_nid_t                self;
53         lnet_process_id_t         peer;
54         int                       err;
55
56         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
57         if (err != 0) {
58                 CERROR("cannot find peer %s!\n", uuid->uuid);
59                 return NULL;
60         }
61
62         c = ptlrpc_get_connection(peer, self, uuid);
63         if (c) {
64                 memcpy(c->c_remote_uuid.uuid,
65                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
66         }
67
68         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
69
70         return c;
71 }
72
73 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
74                                  struct obd_uuid *uuid)
75 {
76         lnet_nid_t        self;
77         lnet_process_id_t peer;
78         int               err;
79
80         err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
81         if (err != 0) {
82                 CERROR("cannot find peer %s!\n", uuid->uuid);
83                 return;
84         }
85
86         conn->c_peer = peer;
87         conn->c_self = self;
88         return;
89 }
90
91 static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
92 {
93         struct ptlrpc_bulk_desc *desc;
94
95         OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages]));
96         if (!desc)
97                 return NULL;
98
99         spin_lock_init(&desc->bd_lock);
100         init_waitqueue_head(&desc->bd_waitq);
101         desc->bd_max_iov = npages;
102         desc->bd_iov_count = 0;
103         desc->bd_md_h = LNET_INVALID_HANDLE;
104         desc->bd_portal = portal;
105         desc->bd_type = type;
106
107         return desc;
108 }
109
110 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
111                                                int npages, int type, int portal)
112 {
113         struct obd_import *imp = req->rq_import;
114         struct ptlrpc_bulk_desc *desc;
115
116         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
117         desc = new_bulk(npages, type, portal);
118         if (desc == NULL)
119                 RETURN(NULL);
120
121         desc->bd_import_generation = req->rq_import_generation;
122         desc->bd_import = class_import_get(imp);
123         desc->bd_req = req;
124
125         desc->bd_cbid.cbid_fn  = client_bulk_callback;
126         desc->bd_cbid.cbid_arg = desc;
127
128         /* This makes req own desc, and free it when she frees herself */
129         req->rq_bulk = desc;
130
131         return desc;
132 }
133
134 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
135                                                int npages, int type, int portal)
136 {
137         struct obd_export *exp = req->rq_export;
138         struct ptlrpc_bulk_desc *desc;
139
140         LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
141
142         desc = new_bulk(npages, type, portal);
143         if (desc == NULL)
144                 RETURN(NULL);
145
146         desc->bd_export = class_export_get(exp);
147         desc->bd_req = req;
148
149         desc->bd_cbid.cbid_fn  = server_bulk_callback;
150         desc->bd_cbid.cbid_arg = desc;
151
152         /* NB we don't assign rq_bulk here; server-side requests are
153          * re-used, and the handler frees the bulk desc explicitly. */
154
155         return desc;
156 }
157
158 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
159                            struct page *page, int pageoffset, int len)
160 {
161         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
162         LASSERT(page != NULL);
163         LASSERT(pageoffset >= 0);
164         LASSERT(len > 0);
165         LASSERT(pageoffset + len <= PAGE_SIZE);
166
167         desc->bd_nob += len;
168
169         ptlrpc_add_bulk_page(desc, page, pageoffset, len);
170 }
171
172 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
173 {
174         ENTRY;
175
176         LASSERT(desc != NULL);
177         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
178         LASSERT(!desc->bd_network_rw);         /* network hands off or */
179         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
180         if (desc->bd_export)
181                 class_export_put(desc->bd_export);
182         else
183                 class_import_put(desc->bd_import);
184
185         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
186                                 bd_iov[desc->bd_max_iov]));
187         EXIT;
188 }
189
190 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
191 {
192         struct list_head *l, *tmp;
193         struct ptlrpc_request *req;
194
195         if (!pool)
196                 return;
197
198         list_for_each_safe(l, tmp, &pool->prp_req_list) {
199                 req = list_entry(l, struct ptlrpc_request, rq_list);
200                 list_del(&req->rq_list);
201                 LASSERT (req->rq_reqmsg);
202                 OBD_FREE(req->rq_reqmsg, pool->prp_rq_size);
203                 OBD_FREE(req, sizeof(*req));
204         }
205         OBD_FREE(pool, sizeof(*pool));
206 }
207
208 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
209 {
210         int i;
211         int size = 1;
212
213         while (size < pool->prp_rq_size)
214                 size <<= 1;
215
216         LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size,
217                  "Trying to change pool size with nonempty pool "
218                  "from %d to %d bytes\n", pool->prp_rq_size, size);
219
220         spin_lock(&pool->prp_lock);
221         pool->prp_rq_size = size;
222         for (i = 0; i < num_rq; i++) {
223                 struct ptlrpc_request *req;
224                 struct lustre_msg *msg;
225                 OBD_ALLOC(req, sizeof(struct ptlrpc_request));
226                 if (!req)
227                         goto out;
228                 OBD_ALLOC_GFP(msg, size, GFP_KERNEL);
229                 if (!msg) {
230                         OBD_FREE(req, sizeof(struct ptlrpc_request));
231                         goto out;
232                 }
233                 req->rq_reqmsg = msg;
234                 req->rq_pool = pool;
235                 list_add_tail(&req->rq_list, &pool->prp_req_list);
236         }
237 out:
238         spin_unlock(&pool->prp_lock);
239         return;
240 }
241
242 struct ptlrpc_request_pool *ptlrpc_init_rq_pool(int num_rq, int msgsize,
243                                                 void (*populate_pool)(struct ptlrpc_request_pool *, int))
244 {
245         struct ptlrpc_request_pool *pool;
246
247         OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
248         if (!pool)
249                 return NULL;
250
251         /* Request next power of two for the allocation, because internally
252            kernel would do exactly this */
253
254         spin_lock_init(&pool->prp_lock);
255         INIT_LIST_HEAD(&pool->prp_req_list);
256         pool->prp_rq_size = msgsize;
257         pool->prp_populate = populate_pool;
258
259         populate_pool(pool, num_rq);
260
261         if (list_empty(&pool->prp_req_list)) {
262                 /* have not allocated a single request for the pool */
263                 OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
264                 pool = NULL;
265         }
266         return pool;
267 }
268
269 static struct ptlrpc_request *ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
270 {
271         struct ptlrpc_request *request;
272         struct lustre_msg *reqmsg;
273
274         if (!pool)
275                 return NULL;
276
277         spin_lock(&pool->prp_lock);
278
279         /* See if we have anything in a pool, and bail out if nothing,
280          * in writeout path, where this matters, this is safe to do, because
281          * nothing is lost in this case, and when some in-flight requests
282          * complete, this code will be called again. */
283         if (unlikely(list_empty(&pool->prp_req_list))) {
284                 spin_unlock(&pool->prp_lock);
285                 return NULL;
286         }
287
288         request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
289                              rq_list);
290         list_del(&request->rq_list);
291         spin_unlock(&pool->prp_lock);
292
293         LASSERT(request->rq_reqmsg);
294         LASSERT(request->rq_pool);
295
296         reqmsg = request->rq_reqmsg;
297         memset(request, 0, sizeof(*request));
298         request->rq_reqmsg = reqmsg;
299         request->rq_pool = pool;
300         request->rq_reqlen = pool->prp_rq_size;
301         return request;
302 }
303
304 struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp, int opcode,
305                                             int count, int *lengths,
306                                             char **bufs,
307                                             struct ptlrpc_request_pool *pool)
308 {
309         struct ptlrpc_request *request = NULL;
310         int rc;
311         ENTRY;
312
313         LASSERT((unsigned long)imp > 0x1000);
314         LASSERT(imp != LP_POISON);
315         LASSERT((unsigned long)imp->imp_client > 0x1000);
316         LASSERT(imp->imp_client != LP_POISON);
317
318         if (pool)
319                 request = ptlrpc_prep_req_from_pool(pool);
320
321         if (!request)
322                 OBD_ALLOC(request, sizeof(*request));
323
324         if (!request) {
325                 CERROR("request allocation out of memory\n");
326                 RETURN(NULL);
327         }
328
329         rc = lustre_pack_request(request, count, lengths, bufs);
330         if (rc) {
331                 LASSERT(!request->rq_pool);
332                 OBD_FREE(request, sizeof(*request));
333                 RETURN(NULL);
334         }
335
336         if (imp->imp_server_timeout)
337                 request->rq_timeout = obd_timeout / 2;
338         else
339                 request->rq_timeout = obd_timeout;
340         request->rq_send_state = LUSTRE_IMP_FULL;
341         request->rq_type = PTL_RPC_MSG_REQUEST;
342         request->rq_import = class_import_get(imp);
343         request->rq_export = NULL;
344
345         request->rq_req_cbid.cbid_fn  = request_out_callback;
346         request->rq_req_cbid.cbid_arg = request;
347
348         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
349         request->rq_reply_cbid.cbid_arg = request;
350
351         request->rq_phase = RQ_PHASE_NEW;
352
353         /* XXX FIXME bug 249 */
354         request->rq_request_portal = imp->imp_client->cli_request_portal;
355         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
356
357         spin_lock_init(&request->rq_lock);
358         INIT_LIST_HEAD(&request->rq_list);
359         INIT_LIST_HEAD(&request->rq_replay_list);
360         INIT_LIST_HEAD(&request->rq_set_chain);
361         init_waitqueue_head(&request->rq_reply_waitq);
362         request->rq_xid = ptlrpc_next_xid();
363         atomic_set(&request->rq_refcount, 1);
364
365         request->rq_reqmsg->opc = opcode;
366         request->rq_reqmsg->flags = 0;
367
368         RETURN(request);
369 }
370
371 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
372                                        int count, int *lengths, char **bufs)
373 {
374         return ptlrpc_prep_req_pool(imp, opcode, count, lengths, bufs, NULL);
375 }
376
377
378 struct ptlrpc_request_set *ptlrpc_prep_set(void)
379 {
380         struct ptlrpc_request_set *set;
381
382         OBD_ALLOC(set, sizeof *set);
383         if (!set)
384                 RETURN(NULL);
385         INIT_LIST_HEAD(&set->set_requests);
386         init_waitqueue_head(&set->set_waitq);
387         set->set_remaining = 0;
388         spin_lock_init(&set->set_new_req_lock);
389         INIT_LIST_HEAD(&set->set_new_requests);
390
391         RETURN(set);
392 }
393
394 /* Finish with this set; opposite of prep_set. */
395 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
396 {
397         struct list_head *tmp;
398         struct list_head *next;
399         int               expected_phase;
400         int               n = 0;
401         ENTRY;
402
403         /* Requests on the set should either all be completed, or all be new */
404         expected_phase = (set->set_remaining == 0) ?
405                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
406         list_for_each (tmp, &set->set_requests) {
407                 struct ptlrpc_request *req =
408                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
409
410                 LASSERT(req->rq_phase == expected_phase);
411                 n++;
412         }
413
414         LASSERT(set->set_remaining == 0 || set->set_remaining == n);
415
416         list_for_each_safe(tmp, next, &set->set_requests) {
417                 struct ptlrpc_request *req =
418                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
419                 list_del_init(&req->rq_set_chain);
420
421                 LASSERT(req->rq_phase == expected_phase);
422
423                 if (req->rq_phase == RQ_PHASE_NEW) {
424
425                         if (req->rq_interpret_reply != NULL) {
426                                 int (*interpreter)(struct ptlrpc_request *,
427                                                    void *, int) =
428                                         req->rq_interpret_reply;
429
430                                 /* higher level (i.e. LOV) failed;
431                                  * let the sub reqs clean up */
432                                 req->rq_status = -EBADR;
433                                 interpreter(req, &req->rq_async_args,
434                                             req->rq_status);
435                         }
436                         set->set_remaining--;
437                 }
438
439                 req->rq_set = NULL;
440                 ptlrpc_req_finished (req);
441         }
442
443         LASSERT(set->set_remaining == 0);
444
445         OBD_FREE(set, sizeof(*set));
446         EXIT;
447 }
448
449 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
450                         struct ptlrpc_request *req)
451 {
452         /* The set takes over the caller's request reference */
453         list_add_tail(&req->rq_set_chain, &set->set_requests);
454         req->rq_set = set;
455         set->set_remaining++;
456         atomic_inc(&req->rq_import->imp_inflight);
457 }
458
459 /* lock so many callers can add things, the context that owns the set
460  * is supposed to notice these and move them into the set proper. */
461 void ptlrpc_set_add_new_req(struct ptlrpc_request_set *set,
462                             struct ptlrpc_request *req)
463 {
464         unsigned long flags;
465         spin_lock_irqsave(&set->set_new_req_lock, flags);
466         /* The set takes over the caller's request reference */
467         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
468         req->rq_set = set;
469         spin_unlock_irqrestore(&set->set_new_req_lock, flags);
470 }
471
472 /*
473  * Based on the current state of the import, determine if the request
474  * can be sent, is an error, or should be delayed.
475  *
476  * Returns true if this request should be delayed. If false, and
477  * *status is set, then the request can not be sent and *status is the
478  * error code.  If false and status is 0, then request can be sent.
479  *
480  * The imp->imp_lock must be held.
481  */
482 static int ptlrpc_import_delay_req(struct obd_import *imp,
483                                    struct ptlrpc_request *req, int *status)
484 {
485         int delay = 0;
486         ENTRY;
487
488         LASSERT (status != NULL);
489         *status = 0;
490
491         if (imp->imp_state == LUSTRE_IMP_NEW) {
492                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
493                 *status = -EIO;
494                 LBUG();
495         } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
496                 DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
497                 *status = -EIO;
498         } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
499                  imp->imp_state == LUSTRE_IMP_CONNECTING) {
500                 /* allow CONNECT even if import is invalid */ ;
501         } else if (imp->imp_invalid) {
502                 /* If the import has been invalidated (such as by an OST
503                  * failure), the request must fail with -EIO. */
504                 if (!imp->imp_deactive)
505                         DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
506                 *status = -EIO;
507         } else if (req->rq_import_generation != imp->imp_generation) {
508                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
509                 *status = -EIO;
510         } else if (req->rq_send_state != imp->imp_state) {
511                 if (imp->imp_obd->obd_no_recov || imp->imp_dlm_fake ||
512                     req->rq_no_delay)
513                         *status = -EWOULDBLOCK;
514                 else
515                         delay = 1;
516         }
517
518         RETURN(delay);
519 }
520
521 static int ptlrpc_check_reply(struct ptlrpc_request *req)
522 {
523         unsigned long flags;
524         int rc = 0;
525         ENTRY;
526
527         /* serialise with network callback */
528         spin_lock_irqsave (&req->rq_lock, flags);
529
530         if (req->rq_replied) {
531                 DEBUG_REQ(D_NET, req, "REPLIED:");
532                 GOTO(out, rc = 1);
533         }
534
535         if (req->rq_net_err && !req->rq_timedout) {
536                 spin_unlock_irqrestore (&req->rq_lock, flags);
537                 rc = ptlrpc_expire_one_request(req);
538                 spin_lock_irqsave (&req->rq_lock, flags);
539                 GOTO(out, rc);
540         }
541
542         if (req->rq_err) {
543                 DEBUG_REQ(D_ERROR, req, "ABORTED:");
544                 GOTO(out, rc = 1);
545         }
546
547         if (req->rq_resend) {
548                 DEBUG_REQ(D_ERROR, req, "RESEND:");
549                 GOTO(out, rc = 1);
550         }
551
552         if (req->rq_restart) {
553                 DEBUG_REQ(D_ERROR, req, "RESTART:");
554                 GOTO(out, rc = 1);
555         }
556         EXIT;
557  out:
558         spin_unlock_irqrestore (&req->rq_lock, flags);
559         DEBUG_REQ(D_NET, req, "rc = %d for", rc);
560         return rc;
561 }
562
563 static int ptlrpc_check_status(struct ptlrpc_request *req)
564 {
565         int err;
566         ENTRY;
567
568         err = req->rq_repmsg->status;
569         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
570                 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR, err == %d",
571                           err);
572                 RETURN(err < 0 ? err : -EINVAL);
573         }
574
575         if (err < 0) {
576                 DEBUG_REQ(D_INFO, req, "status is %d", err);
577         } else if (err > 0) {
578                 /* XXX: translate this error from net to host */
579                 DEBUG_REQ(D_INFO, req, "status is %d", err);
580         }
581
582         RETURN(err);
583 }
584
585 static int after_reply(struct ptlrpc_request *req)
586 {
587         unsigned long flags;
588         struct obd_import *imp = req->rq_import;
589         int rc;
590         ENTRY;
591
592         LASSERT(!req->rq_receiving_reply);
593
594         /* NB Until this point, the whole of the incoming message,
595          * including buflens, status etc is in the sender's byte order. */
596
597 #if SWAB_PARANOIA
598         /* Clear reply swab mask; this is a new reply in sender's byte order */
599         req->rq_rep_swab_mask = 0;
600 #endif
601         LASSERT (req->rq_nob_received <= req->rq_replen);
602         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
603         if (rc) {
604                 DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d\n", rc);
605                 RETURN(-EPROTO);
606         }
607
608         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
609             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
610                 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)\n",
611                           req->rq_repmsg->type);
612                 RETURN(-EPROTO);
613         }
614
615         rc = ptlrpc_check_status(req);
616
617         /* Either we've been evicted, or the server has failed for
618          * some reason. Try to reconnect, and if that fails, punt to the
619          * upcall. */
620         if ((rc == -ENOTCONN) || (rc == -ENODEV)) {
621                 if (req->rq_send_state != LUSTRE_IMP_FULL ||
622                     imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
623                         RETURN(-ENOTCONN);
624                 }
625
626                 ptlrpc_request_handle_notconn(req);
627
628                 RETURN(rc);
629         }
630
631         /* Store transno in reqmsg for replay. */
632         req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
633
634         if (req->rq_import->imp_replayable) {
635                 spin_lock_irqsave(&imp->imp_lock, flags);
636                 if (req->rq_transno != 0)
637                         ptlrpc_retain_replayable_request(req, imp);
638                 else if (req->rq_commit_cb != NULL) {
639                         spin_unlock_irqrestore(&imp->imp_lock, flags);
640                         req->rq_commit_cb(req);
641                         spin_lock_irqsave(&imp->imp_lock, flags);
642                 }
643
644                 if (req->rq_transno > imp->imp_max_transno)
645                         imp->imp_max_transno = req->rq_transno;
646
647                 /* Replay-enabled imports return commit-status information. */
648                 if (req->rq_repmsg->last_committed)
649                         imp->imp_peer_committed_transno =
650                                 req->rq_repmsg->last_committed;
651                 ptlrpc_free_committed(imp);
652                 spin_unlock_irqrestore(&imp->imp_lock, flags);
653         }
654
655         RETURN(rc);
656 }
657
658 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
659 {
660         struct obd_import     *imp;
661         unsigned long          flags;
662         int rc;
663         ENTRY;
664
665         LASSERT(req->rq_phase == RQ_PHASE_NEW);
666         req->rq_phase = RQ_PHASE_RPC;
667
668         imp = req->rq_import;
669         spin_lock_irqsave(&imp->imp_lock, flags);
670
671         req->rq_import_generation = imp->imp_generation;
672
673         if (ptlrpc_import_delay_req(imp, req, &rc)) {
674                 spin_lock (&req->rq_lock);
675                 req->rq_waiting = 1;
676                 spin_unlock (&req->rq_lock);
677
678                 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
679                           "(%s != %s)",
680                           req->rq_reqmsg->status,
681                           ptlrpc_import_state_name(req->rq_send_state),
682                           ptlrpc_import_state_name(imp->imp_state));
683                 LASSERT(list_empty (&req->rq_list));
684
685                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
686                 spin_unlock_irqrestore(&imp->imp_lock, flags);
687                 RETURN(0);
688         }
689
690         if (rc != 0) {
691                 spin_unlock_irqrestore(&imp->imp_lock, flags);
692                 req->rq_status = rc;
693                 req->rq_phase = RQ_PHASE_INTERPRET;
694                 RETURN(rc);
695         }
696
697         /* XXX this is the same as ptlrpc_queue_wait */
698         LASSERT(list_empty(&req->rq_list));
699         list_add_tail(&req->rq_list, &imp->imp_sending_list);
700         spin_unlock_irqrestore(&imp->imp_lock, flags);
701
702         req->rq_reqmsg->status = current->pid;
703         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
704                " %s:%s:%d:"LPU64":%s:%d\n", current->comm,
705                imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
706                req->rq_xid,
707                libcfs_nid2str(imp->imp_connection->c_peer.nid),
708                req->rq_reqmsg->opc);
709
710         rc = ptl_send_rpc(req);
711         if (rc) {
712                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
713                 req->rq_net_err = 1;
714                 RETURN(rc);
715         }
716         RETURN(0);
717 }
718
719 int ptlrpc_check_set(struct ptlrpc_request_set *set)
720 {
721         unsigned long flags;
722         struct list_head *tmp;
723         int force_timer_recalc = 0;
724         ENTRY;
725
726         if (set->set_remaining == 0)
727                 RETURN(1);
728
729         list_for_each(tmp, &set->set_requests) {
730                 struct ptlrpc_request *req =
731                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
732                 struct obd_import *imp = req->rq_import;
733                 int rc = 0;
734
735                 if (req->rq_phase == RQ_PHASE_NEW &&
736                     ptlrpc_send_new_req(req)) {
737                         force_timer_recalc = 1;
738                 }
739
740                 if (!(req->rq_phase == RQ_PHASE_RPC ||
741                       req->rq_phase == RQ_PHASE_BULK ||
742                       req->rq_phase == RQ_PHASE_INTERPRET ||
743                       req->rq_phase == RQ_PHASE_COMPLETE)) {
744                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
745                         LBUG();
746                 }
747
748                 if (req->rq_phase == RQ_PHASE_COMPLETE)
749                         continue;
750
751                 if (req->rq_phase == RQ_PHASE_INTERPRET)
752                         GOTO(interpret, req->rq_status);
753
754                 if (req->rq_net_err && !req->rq_timedout)
755                         ptlrpc_expire_one_request(req);
756
757                 if (req->rq_err) {
758                         ptlrpc_unregister_reply(req);
759                         if (req->rq_status == 0)
760                                 req->rq_status = -EIO;
761                         req->rq_phase = RQ_PHASE_INTERPRET;
762
763                         spin_lock_irqsave(&imp->imp_lock, flags);
764                         list_del_init(&req->rq_list);
765                         spin_unlock_irqrestore(&imp->imp_lock, flags);
766
767                         GOTO(interpret, req->rq_status);
768                 }
769
770                 /* ptlrpc_queue_wait->l_wait_event guarantees that rq_intr
771                  * will only be set after rq_timedout, but the oig waiting
772                  * path sets rq_intr irrespective of whether ptlrpcd has
773                  * seen a timeout.  our policy is to only interpret
774                  * interrupted rpcs after they have timed out */
775                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting)) {
776                         /* NB could be on delayed list */
777                         ptlrpc_unregister_reply(req);
778                         req->rq_status = -EINTR;
779                         req->rq_phase = RQ_PHASE_INTERPRET;
780
781                         spin_lock_irqsave(&imp->imp_lock, flags);
782                         list_del_init(&req->rq_list);
783                         spin_unlock_irqrestore(&imp->imp_lock, flags);
784
785                         GOTO(interpret, req->rq_status);
786                 }
787
788                 if (req->rq_phase == RQ_PHASE_RPC) {
789                         if (req->rq_timedout||req->rq_waiting||req->rq_resend) {
790                                 int status;
791
792                                 ptlrpc_unregister_reply(req);
793
794                                 spin_lock_irqsave(&imp->imp_lock, flags);
795
796                                 if (ptlrpc_import_delay_req(imp, req, &status)){
797                                         spin_unlock_irqrestore(&imp->imp_lock,
798                                                                flags);
799                                         continue;
800                                 }
801
802                                 list_del_init(&req->rq_list);
803                                 if (status != 0)  {
804                                         req->rq_status = status;
805                                         req->rq_phase = RQ_PHASE_INTERPRET;
806                                         spin_unlock_irqrestore(&imp->imp_lock,
807                                                                flags);
808                                         GOTO(interpret, req->rq_status);
809                                 }
810                                 if (req->rq_no_resend) {
811                                         req->rq_status = -ENOTCONN;
812                                         req->rq_phase = RQ_PHASE_INTERPRET;
813                                         spin_unlock_irqrestore(&imp->imp_lock,
814                                                                flags);
815                                         GOTO(interpret, req->rq_status);
816                                 }
817                                 list_add_tail(&req->rq_list,
818                                               &imp->imp_sending_list);
819
820                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
821
822                                 req->rq_waiting = 0;
823                                 if (req->rq_resend) {
824                                         lustre_msg_add_flags(req->rq_reqmsg,
825                                                              MSG_RESENT);
826                                         if (req->rq_bulk) {
827                                                 __u64 old_xid = req->rq_xid;
828
829                                                 ptlrpc_unregister_bulk (req);
830
831                                                 /* ensure previous bulk fails */
832                                                 req->rq_xid = ptlrpc_next_xid();
833                                                 CDEBUG(D_HA, "resend bulk "
834                                                        "old x"LPU64
835                                                        " new x"LPU64"\n",
836                                                        old_xid, req->rq_xid);
837                                         }
838                                 }
839
840                                 rc = ptl_send_rpc(req);
841                                 if (rc) {
842                                         DEBUG_REQ(D_HA, req, "send failed (%d)",
843                                                   rc);
844                                         force_timer_recalc = 1;
845                                         req->rq_net_err = 1;
846                                 }
847                                 /* need to reset the timeout */
848                                 force_timer_recalc = 1;
849                         }
850
851                         /* Still waiting for a reply? */
852                         if (ptlrpc_client_receiving_reply(req))
853                                 continue;
854
855                         /* Did we actually receive a reply? */
856                         if (!ptlrpc_client_replied(req))
857                                 continue;
858
859                         spin_lock_irqsave(&imp->imp_lock, flags);
860                         list_del_init(&req->rq_list);
861                         spin_unlock_irqrestore(&imp->imp_lock, flags);
862
863                         req->rq_status = after_reply(req);
864                         if (req->rq_resend) {
865                                 /* Add this req to the delayed list so
866                                    it can be errored if the import is
867                                    evicted after recovery. */
868                                 spin_lock_irqsave (&req->rq_lock, flags);
869                                 list_add_tail(&req->rq_list,
870                                               &imp->imp_delayed_list);
871                                 spin_unlock_irqrestore(&req->rq_lock, flags);
872                                 continue;
873                         }
874
875                         /* If there is no bulk associated with this request,
876                          * then we're done and should let the interpreter
877                          * process the reply.  Similarly if the RPC returned
878                          * an error, and therefore the bulk will never arrive.
879                          */
880                         if (req->rq_bulk == NULL || req->rq_status != 0) {
881                                 req->rq_phase = RQ_PHASE_INTERPRET;
882                                 GOTO(interpret, req->rq_status);
883                         }
884
885                         req->rq_phase = RQ_PHASE_BULK;
886                 }
887
888                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
889                 if (ptlrpc_bulk_active(req->rq_bulk))
890                         continue;
891
892                 if (!req->rq_bulk->bd_success) {
893                         /* The RPC reply arrived OK, but the bulk screwed
894                          * up!  Dead wierd since the server told us the RPC
895                          * was good after getting the REPLY for her GET or
896                          * the ACK for her PUT. */
897                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
898                         LBUG();
899                 }
900
901                 req->rq_phase = RQ_PHASE_INTERPRET;
902
903         interpret:
904                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
905                 LASSERT(!req->rq_receiving_reply);
906
907                 ptlrpc_unregister_reply(req);
908                 if (req->rq_bulk != NULL)
909                         ptlrpc_unregister_bulk (req);
910
911                 req->rq_phase = RQ_PHASE_COMPLETE;
912
913                 if (req->rq_interpret_reply != NULL) {
914                         int (*interpreter)(struct ptlrpc_request *,void *,int) =
915                                 req->rq_interpret_reply;
916                         req->rq_status = interpreter(req, &req->rq_async_args,
917                                                      req->rq_status);
918                 }
919
920                 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:"
921                        "opc %s:%s:%d:"LPU64":%s:%d\n", current->comm,
922                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
923                        req->rq_xid,
924                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
925                        req->rq_reqmsg->opc);
926
927                 set->set_remaining--;
928
929                 atomic_dec(&imp->imp_inflight);
930                 wake_up(&imp->imp_recovery_waitq);
931         }
932
933         /* If we hit an error, we want to recover promptly. */
934         RETURN(set->set_remaining == 0 || force_timer_recalc);
935 }
936
937 int ptlrpc_expire_one_request(struct ptlrpc_request *req)
938 {
939         unsigned long      flags;
940         struct obd_import *imp = req->rq_import;
941         ENTRY;
942
943         DEBUG_REQ(D_ERROR, req, "timeout (sent at %lu, %lus ago)",
944                   (long)req->rq_sent, CURRENT_SECONDS - req->rq_sent);
945
946         spin_lock_irqsave (&req->rq_lock, flags);
947         req->rq_timedout = 1;
948         spin_unlock_irqrestore (&req->rq_lock, flags);
949
950         ptlrpc_unregister_reply (req);
951
952         if (obd_dump_on_timeout)
953                 libcfs_debug_dumplog();
954
955         if (req->rq_bulk != NULL)
956                 ptlrpc_unregister_bulk (req);
957
958         if (imp == NULL) {
959                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
960                 RETURN(1);
961         }
962
963         /* The DLM server doesn't want recovery run on its imports. */
964         if (imp->imp_dlm_fake)
965                 RETURN(1);
966
967         /* If this request is for recovery or other primordial tasks,
968          * then error it out here. */
969         if (req->rq_send_state != LUSTRE_IMP_FULL ||
970             imp->imp_obd->obd_no_recov) {
971                 spin_lock_irqsave (&req->rq_lock, flags);
972                 req->rq_status = -ETIMEDOUT;
973                 req->rq_err = 1;
974                 spin_unlock_irqrestore (&req->rq_lock, flags);
975                 RETURN(1);
976         }
977
978         ptlrpc_fail_import(imp, req->rq_import_generation);
979
980         RETURN(0);
981 }
982
983 int ptlrpc_expired_set(void *data)
984 {
985         struct ptlrpc_request_set *set = data;
986         struct list_head          *tmp;
987         time_t                     now = CURRENT_SECONDS;
988         ENTRY;
989
990         LASSERT(set != NULL);
991
992         /* A timeout expired; see which reqs it applies to... */
993         list_for_each (tmp, &set->set_requests) {
994                 struct ptlrpc_request *req =
995                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
996
997                 /* request in-flight? */
998                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting &&
999                        !req->rq_resend) ||
1000                       (req->rq_phase == RQ_PHASE_BULK)))
1001                         continue;
1002
1003                 if (req->rq_timedout ||           /* already dealt with */
1004                     req->rq_sent + req->rq_timeout > now) /* not expired */
1005                         continue;
1006
1007                 /* deal with this guy */
1008                 ptlrpc_expire_one_request (req);
1009         }
1010
1011         /* When waiting for a whole set, we always to break out of the
1012          * sleep so we can recalculate the timeout, or enable interrupts
1013          * iff everyone's timed out.
1014          */
1015         RETURN(1);
1016 }
1017
1018 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
1019 {
1020         unsigned long flags;
1021         spin_lock_irqsave(&req->rq_lock, flags);
1022         req->rq_intr = 1;
1023         spin_unlock_irqrestore(&req->rq_lock, flags);
1024 }
1025
1026 void ptlrpc_interrupted_set(void *data)
1027 {
1028         struct ptlrpc_request_set *set = data;
1029         struct list_head *tmp;
1030
1031         LASSERT(set != NULL);
1032         CERROR("INTERRUPTED SET %p\n", set);
1033
1034         list_for_each(tmp, &set->set_requests) {
1035                 struct ptlrpc_request *req =
1036                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1037
1038                 if (req->rq_phase != RQ_PHASE_RPC)
1039                         continue;
1040
1041                 ptlrpc_mark_interrupted(req);
1042         }
1043 }
1044
1045 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
1046 {
1047         struct list_head      *tmp;
1048         time_t                 now = CURRENT_SECONDS;
1049         time_t                 deadline;
1050         int                    timeout = 0;
1051         struct ptlrpc_request *req;
1052         ENTRY;
1053
1054         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
1055
1056         list_for_each(tmp, &set->set_requests) {
1057                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1058
1059                 /* request in-flight? */
1060                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
1061                       (req->rq_phase == RQ_PHASE_BULK)))
1062                         continue;
1063
1064                 if (req->rq_timedout)   /* already timed out */
1065                         continue;
1066
1067                 deadline = req->rq_sent + req->rq_timeout;
1068                 if (deadline <= now)    /* actually expired already */
1069                         timeout = 1;    /* ASAP */
1070                 else if (timeout == 0 || timeout > deadline - now)
1071                         timeout = deadline - now;
1072         }
1073         RETURN(timeout);
1074 }
1075
1076 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
1077 {
1078         struct list_head      *tmp;
1079         struct ptlrpc_request *req;
1080         struct l_wait_info     lwi;
1081         int                    rc, timeout;
1082         ENTRY;
1083
1084         LASSERT(!list_empty(&set->set_requests));
1085         list_for_each(tmp, &set->set_requests) {
1086                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1087                 if (req->rq_phase == RQ_PHASE_NEW)
1088                         (void)ptlrpc_send_new_req(req);
1089         }
1090
1091         do {
1092                 timeout = ptlrpc_set_next_timeout(set);
1093
1094                 /* wait until all complete, interrupted, or an in-flight
1095                  * req times out */
1096                 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
1097                        set, timeout);
1098                 lwi = LWI_TIMEOUT_INTR((timeout ? timeout : 1) * HZ,
1099                                        ptlrpc_expired_set,
1100                                        ptlrpc_interrupted_set, set);
1101                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
1102
1103                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
1104
1105                 /* -EINTR => all requests have been flagged rq_intr so next
1106                  * check completes.
1107                  * -ETIMEOUTD => someone timed out.  When all reqs have
1108                  * timed out, signals are enabled allowing completion with
1109                  * EINTR.
1110                  * I don't really care if we go once more round the loop in
1111                  * the error cases -eeb. */
1112         } while (rc != 0 || set->set_remaining != 0);
1113
1114         LASSERT(set->set_remaining == 0);
1115
1116         rc = 0;
1117         list_for_each(tmp, &set->set_requests) {
1118                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1119
1120                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
1121                 if (req->rq_status != 0)
1122                         rc = req->rq_status;
1123         }
1124
1125         if (set->set_interpret != NULL) {
1126                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
1127                         set->set_interpret;
1128                 rc = interpreter (set, set->set_arg, rc);
1129         }
1130
1131         RETURN(rc);
1132 }
1133
1134 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
1135 {
1136         struct ptlrpc_request_pool *pool = request->rq_pool;
1137
1138         spin_lock(&pool->prp_lock);
1139         list_add_tail(&request->rq_list, &pool->prp_req_list);
1140         spin_unlock(&pool->prp_lock);
1141 }
1142
1143 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
1144 {
1145         ENTRY;
1146         if (request == NULL) {
1147                 EXIT;
1148                 return;
1149         }
1150
1151         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
1152         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
1153         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
1154         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
1155
1156         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
1157          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
1158         if (request->rq_import != NULL) {
1159                 unsigned long flags = 0;
1160                 if (!locked)
1161                         spin_lock_irqsave(&request->rq_import->imp_lock, flags);
1162                 list_del_init(&request->rq_replay_list);
1163                 if (!locked)
1164                         spin_unlock_irqrestore(&request->rq_import->imp_lock,
1165                                                flags);
1166         }
1167         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
1168
1169         if (atomic_read(&request->rq_refcount) != 0) {
1170                 DEBUG_REQ(D_ERROR, request,
1171                           "freeing request with nonzero refcount");
1172                 LBUG();
1173         }
1174
1175         if (request->rq_repmsg != NULL) {
1176                 OBD_FREE(request->rq_repmsg, request->rq_replen);
1177                 request->rq_repmsg = NULL;
1178         }
1179         if (request->rq_export != NULL) {
1180                 class_export_put(request->rq_export);
1181                 request->rq_export = NULL;
1182         }
1183         if (request->rq_import != NULL) {
1184                 class_import_put(request->rq_import);
1185                 request->rq_import = NULL;
1186         }
1187         if (request->rq_bulk != NULL)
1188                 ptlrpc_free_bulk(request->rq_bulk);
1189
1190         if (request->rq_pool) {
1191                 __ptlrpc_free_req_to_pool(request);
1192         } else {
1193                 if (request->rq_reqmsg != NULL) {
1194                         OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
1195                         request->rq_reqmsg = NULL;
1196                 }
1197                 OBD_FREE(request, sizeof(*request));
1198         }
1199         EXIT;
1200 }
1201
1202 void ptlrpc_free_req(struct ptlrpc_request *request)
1203 {
1204         __ptlrpc_free_req(request, 0);
1205 }
1206
1207 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
1208 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
1209 {
1210         LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
1211         (void)__ptlrpc_req_finished(request, 1);
1212 }
1213
1214 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
1215 {
1216         ENTRY;
1217         if (request == NULL)
1218                 RETURN(1);
1219
1220         if (request == LP_POISON ||
1221             request->rq_reqmsg == LP_POISON) {
1222                 CERROR("dereferencing freed request (bug 575)\n");
1223                 LBUG();
1224                 RETURN(1);
1225         }
1226
1227         DEBUG_REQ(D_INFO, request, "refcount now %u",
1228                   atomic_read(&request->rq_refcount) - 1);
1229
1230         if (atomic_dec_and_test(&request->rq_refcount)) {
1231                 __ptlrpc_free_req(request, locked);
1232                 RETURN(1);
1233         }
1234
1235         RETURN(0);
1236 }
1237
1238 void ptlrpc_req_finished(struct ptlrpc_request *request)
1239 {
1240         __ptlrpc_req_finished(request, 0);
1241 }
1242
1243 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
1244 {
1245         return request->rq_xid;
1246 }
1247 EXPORT_SYMBOL(ptlrpc_req_xid);
1248
1249 /* Disengage the client's reply buffer from the network
1250  * NB does _NOT_ unregister any client-side bulk.
1251  * IDEMPOTENT, but _not_ safe against concurrent callers.
1252  * The request owner (i.e. the thread doing the I/O) must call...
1253  */
1254 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
1255 {
1256         int                rc;
1257         wait_queue_head_t *wq;
1258         struct l_wait_info lwi;
1259
1260         LASSERT(!in_interrupt ());             /* might sleep */
1261
1262         if (!ptlrpc_client_receiving_reply(request))
1263                 return;
1264
1265         LNetMDUnlink (request->rq_reply_md_h);
1266
1267         /* We have to l_wait_event() whatever the result, to give liblustre
1268          * a chance to run reply_in_callback() */
1269
1270         if (request->rq_set != NULL)
1271                 wq = &request->rq_set->set_waitq;
1272         else
1273                 wq = &request->rq_reply_waitq;
1274
1275         for (;;) {
1276                 /* Network access will complete in finite time but the HUGE
1277                  * timeout lets us CWARN for visibility of sluggish NALs */
1278                 lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
1279                 rc = l_wait_event (*wq, !ptlrpc_client_receiving_reply(request), &lwi);
1280                 if (rc == 0)
1281                         return;
1282
1283                 LASSERT (rc == -ETIMEDOUT);
1284                 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout");
1285         }
1286 }
1287
1288 /* caller must hold imp->imp_lock */
1289 void ptlrpc_free_committed(struct obd_import *imp)
1290 {
1291         struct list_head *tmp, *saved;
1292         struct ptlrpc_request *req;
1293         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1294         ENTRY;
1295
1296         LASSERT(imp != NULL);
1297
1298         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1299
1300         CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1301                imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1302
1303         list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1304                 req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1305
1306                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
1307                 LASSERT(req != last_req);
1308                 last_req = req;
1309
1310                 if (req->rq_import_generation < imp->imp_generation) {
1311                         DEBUG_REQ(D_HA, req, "freeing request with old gen");
1312                         GOTO(free_req, 0);
1313                 }
1314
1315                 if (req->rq_replay) {
1316                         DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1317                         continue;
1318                 }
1319
1320                 /* not yet committed */
1321                 if (req->rq_transno > imp->imp_peer_committed_transno) {
1322                         DEBUG_REQ(D_HA, req, "stopping search");
1323                         break;
1324                 }
1325
1326                 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1327                           imp->imp_peer_committed_transno);
1328 free_req:
1329                 if (req->rq_commit_cb != NULL)
1330                         req->rq_commit_cb(req);
1331                 list_del_init(&req->rq_replay_list);
1332                 __ptlrpc_req_finished(req, 1);
1333         }
1334
1335         EXIT;
1336         return;
1337 }
1338
1339 void ptlrpc_cleanup_client(struct obd_import *imp)
1340 {
1341         ENTRY;
1342         EXIT;
1343         return;
1344 }
1345
1346 void ptlrpc_resend_req(struct ptlrpc_request *req)
1347 {
1348         unsigned long flags;
1349
1350         DEBUG_REQ(D_HA, req, "going to resend");
1351         req->rq_reqmsg->handle.cookie = 0;
1352         req->rq_status = -EAGAIN;
1353
1354         spin_lock_irqsave (&req->rq_lock, flags);
1355         req->rq_resend = 1;
1356         req->rq_net_err = 0;
1357         req->rq_timedout = 0;
1358         if (req->rq_bulk) {
1359                 __u64 old_xid = req->rq_xid;
1360
1361                 /* ensure previous bulk fails */
1362                 req->rq_xid = ptlrpc_next_xid();
1363                 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
1364                        old_xid, req->rq_xid);
1365         }
1366         ptlrpc_wake_client_req(req);
1367         spin_unlock_irqrestore (&req->rq_lock, flags);
1368 }
1369
1370 /* XXX: this function and rq_status are currently unused */
1371 void ptlrpc_restart_req(struct ptlrpc_request *req)
1372 {
1373         unsigned long flags;
1374
1375         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1376         req->rq_status = -ERESTARTSYS;
1377
1378         spin_lock_irqsave (&req->rq_lock, flags);
1379         req->rq_restart = 1;
1380         req->rq_timedout = 0;
1381         ptlrpc_wake_client_req(req);
1382         spin_unlock_irqrestore (&req->rq_lock, flags);
1383 }
1384
1385 static int expired_request(void *data)
1386 {
1387         struct ptlrpc_request *req = data;
1388         ENTRY;
1389
1390         RETURN(ptlrpc_expire_one_request(req));
1391 }
1392
1393 static void interrupted_request(void *data)
1394 {
1395         unsigned long flags;
1396
1397         struct ptlrpc_request *req = data;
1398         DEBUG_REQ(D_HA, req, "request interrupted");
1399         spin_lock_irqsave (&req->rq_lock, flags);
1400         req->rq_intr = 1;
1401         spin_unlock_irqrestore (&req->rq_lock, flags);
1402 }
1403
1404 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1405 {
1406         ENTRY;
1407         atomic_inc(&req->rq_refcount);
1408         RETURN(req);
1409 }
1410
1411 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1412                                       struct obd_import *imp)
1413 {
1414         struct list_head *tmp;
1415
1416         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1417
1418         /* clear this for new requests that were resent as well
1419            as resent replayed requests. */
1420         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1421
1422         /* don't re-add requests that have been replayed */
1423         if (!list_empty(&req->rq_replay_list))
1424                 return;
1425
1426         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
1427
1428         LASSERT(imp->imp_replayable);
1429         /* Balanced in ptlrpc_free_committed, usually. */
1430         ptlrpc_request_addref(req);
1431         list_for_each_prev(tmp, &imp->imp_replay_list) {
1432                 struct ptlrpc_request *iter =
1433                         list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1434
1435                 /* We may have duplicate transnos if we create and then
1436                  * open a file, or for closes retained if to match creating
1437                  * opens, so use req->rq_xid as a secondary key.
1438                  * (See bugs 684, 685, and 428.)
1439                  * XXX no longer needed, but all opens need transnos!
1440                  */
1441                 if (iter->rq_transno > req->rq_transno)
1442                         continue;
1443
1444                 if (iter->rq_transno == req->rq_transno) {
1445                         LASSERT(iter->rq_xid != req->rq_xid);
1446                         if (iter->rq_xid > req->rq_xid)
1447                                 continue;
1448                 }
1449
1450                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
1451                 return;
1452         }
1453
1454         list_add_tail(&req->rq_replay_list, &imp->imp_replay_list);
1455 }
1456
1457 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1458 {
1459         int rc = 0;
1460         int brc;
1461         struct l_wait_info lwi;
1462         struct obd_import *imp = req->rq_import;
1463         unsigned long flags;
1464         int timeout = 0;
1465         ENTRY;
1466
1467         LASSERT(req->rq_set == NULL);
1468         LASSERT(!req->rq_receiving_reply);
1469         atomic_inc(&imp->imp_inflight);
1470
1471         /* for distributed debugging */
1472         req->rq_reqmsg->status = current->pid;
1473         LASSERT(imp->imp_obd != NULL);
1474         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc "
1475                "%s:%s:%d:"LPU64":%s:%d\n", current->comm,
1476                imp->imp_obd->obd_uuid.uuid,
1477                req->rq_reqmsg->status, req->rq_xid,
1478                libcfs_nid2str(imp->imp_connection->c_peer.nid),
1479                req->rq_reqmsg->opc);
1480
1481         /* Mark phase here for a little debug help */
1482         req->rq_phase = RQ_PHASE_RPC;
1483
1484         spin_lock_irqsave(&imp->imp_lock, flags);
1485         req->rq_import_generation = imp->imp_generation;
1486 restart:
1487         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1488                 list_del(&req->rq_list);
1489
1490                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1491                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1492
1493                 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%s != %s)",
1494                           current->comm,
1495                           ptlrpc_import_state_name(req->rq_send_state),
1496                           ptlrpc_import_state_name(imp->imp_state));
1497                 lwi = LWI_INTR(interrupted_request, req);
1498                 rc = l_wait_event(req->rq_reply_waitq,
1499                                   (req->rq_send_state == imp->imp_state ||
1500                                    req->rq_err || req->rq_intr),
1501                                   &lwi);
1502                 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%s == %s or %d/%d == 1)",
1503                           current->comm,
1504                           ptlrpc_import_state_name(imp->imp_state),
1505                           ptlrpc_import_state_name(req->rq_send_state),
1506                           req->rq_err, req->rq_intr);
1507
1508                 spin_lock_irqsave(&imp->imp_lock, flags);
1509                 list_del_init(&req->rq_list);
1510
1511                 if (req->rq_err) {
1512                         rc = -EIO;
1513                 }
1514                 else if (req->rq_intr) {
1515                         rc = -EINTR;
1516                 }
1517                 else if (req->rq_no_resend) {
1518                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1519                         GOTO(out, rc = -ETIMEDOUT);
1520                 }
1521                 else {
1522                         GOTO(restart, rc);
1523                 }
1524         }
1525
1526         if (rc != 0) {
1527                 list_del_init(&req->rq_list);
1528                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1529                 req->rq_status = rc; // XXX this ok?
1530                 GOTO(out, rc);
1531         }
1532
1533         if (req->rq_resend) {
1534                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1535
1536                 if (req->rq_bulk != NULL) {
1537                         ptlrpc_unregister_bulk (req);
1538
1539                         /* bulk requests are supposed to be
1540                          * idempotent, so we are free to bump the xid
1541                          * here, which we need to do before
1542                          * registering the bulk again (bug 6371).
1543                          * print the old xid first for sanity.
1544                          */
1545                         DEBUG_REQ(D_HA, req, "bumping xid for bulk: ");
1546                         req->rq_xid = ptlrpc_next_xid();
1547                 }
1548
1549                 DEBUG_REQ(D_HA, req, "resending: ");
1550         }
1551
1552         /* XXX this is the same as ptlrpc_set_wait */
1553         LASSERT(list_empty(&req->rq_list));
1554         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1555         spin_unlock_irqrestore(&imp->imp_lock, flags);
1556
1557         rc = ptl_send_rpc(req);
1558         if (rc) {
1559                 DEBUG_REQ(D_HA, req, "send failed (%d); recovering", rc);
1560                 timeout = 1;
1561         } else {
1562                 timeout = MAX(req->rq_timeout * HZ, 1);
1563                 DEBUG_REQ(D_NET, req, "-- sleeping for %d jiffies", timeout);
1564         }
1565         lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1566                                req);
1567         l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req), &lwi);
1568         DEBUG_REQ(D_NET, req, "-- done sleeping");
1569
1570         CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:opc "
1571                "%s:%s:%d:"LPU64":%s:%d\n", current->comm,
1572                imp->imp_obd->obd_uuid.uuid,
1573                req->rq_reqmsg->status, req->rq_xid,
1574                libcfs_nid2str(imp->imp_connection->c_peer.nid),
1575                req->rq_reqmsg->opc);
1576
1577         spin_lock_irqsave(&imp->imp_lock, flags);
1578         list_del_init(&req->rq_list);
1579         spin_unlock_irqrestore(&imp->imp_lock, flags);
1580
1581         /* If the reply was received normally, this just grabs the spinlock
1582          * (ensuring the reply callback has returned), sees that
1583          * req->rq_receiving_reply is clear and returns. */
1584         ptlrpc_unregister_reply (req);
1585
1586         if (req->rq_err)
1587                 GOTO(out, rc = -EIO);
1588
1589         /* Resend if we need to, unless we were interrupted. */
1590         if (req->rq_resend && !req->rq_intr) {
1591                 /* ...unless we were specifically told otherwise. */
1592                 if (req->rq_no_resend)
1593                         GOTO(out, rc = -ETIMEDOUT);
1594                 spin_lock_irqsave(&imp->imp_lock, flags);
1595                 goto restart;
1596         }
1597
1598         if (req->rq_intr) {
1599                 /* Should only be interrupted if we timed out. */
1600                 if (!req->rq_timedout)
1601                         DEBUG_REQ(D_ERROR, req,
1602                                   "rq_intr set but rq_timedout not");
1603                 GOTO(out, rc = -EINTR);
1604         }
1605
1606         if (req->rq_timedout) {                 /* non-recoverable timeout */
1607                 GOTO(out, rc = -ETIMEDOUT);
1608         }
1609
1610         if (!req->rq_replied) {
1611                 /* How can this be? -eeb */
1612                 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1613                 LBUG();
1614                 GOTO(out, rc = req->rq_status);
1615         }
1616
1617         rc = after_reply (req);
1618         /* NB may return +ve success rc */
1619         if (req->rq_resend) {
1620                 spin_lock_irqsave(&imp->imp_lock, flags);
1621                 goto restart;
1622         }
1623
1624  out:
1625         if (req->rq_bulk != NULL) {
1626                 if (rc >= 0) {
1627                         /* success so far.  Note that anything going wrong
1628                          * with bulk now, is EXTREMELY strange, since the
1629                          * server must have believed that the bulk
1630                          * tranferred OK before she replied with success to
1631                          * me. */
1632                         lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1633                         brc = l_wait_event(req->rq_reply_waitq,
1634                                            !ptlrpc_bulk_active(req->rq_bulk),
1635                                            &lwi);
1636                         LASSERT(brc == 0 || brc == -ETIMEDOUT);
1637                         if (brc != 0) {
1638                                 LASSERT(brc == -ETIMEDOUT);
1639                                 DEBUG_REQ(D_ERROR, req, "bulk timed out");
1640                                 rc = brc;
1641                         } else if (!req->rq_bulk->bd_success) {
1642                                 DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1643                                 rc = -EIO;
1644                         }
1645                 }
1646                 if (rc < 0)
1647                         ptlrpc_unregister_bulk (req);
1648         }
1649
1650         LASSERT(!req->rq_receiving_reply);
1651         req->rq_phase = RQ_PHASE_INTERPRET;
1652
1653         atomic_dec(&imp->imp_inflight);
1654         wake_up(&imp->imp_recovery_waitq);
1655         RETURN(rc);
1656 }
1657
1658 struct ptlrpc_replay_async_args {
1659         int praa_old_state;
1660         int praa_old_status;
1661 };
1662
1663 static int ptlrpc_replay_interpret(struct ptlrpc_request *req,
1664                                     void * data, int rc)
1665 {
1666         struct ptlrpc_replay_async_args *aa = data;
1667         struct obd_import *imp = req->rq_import;
1668         unsigned long flags;
1669
1670         atomic_dec(&imp->imp_replay_inflight);
1671
1672         if (!req->rq_replied) {
1673                 CERROR("request replay timed out, restarting recovery\n");
1674                 GOTO(out, rc = -ETIMEDOUT);
1675         }
1676
1677 #if SWAB_PARANOIA
1678         /* Clear reply swab mask; this is a new reply in sender's byte order */
1679         req->rq_rep_swab_mask = 0;
1680 #endif
1681         LASSERT (req->rq_nob_received <= req->rq_replen);
1682         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
1683         if (rc) {
1684                 DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d\n", rc);
1685                 GOTO(out, rc = -EPROTO);
1686         }
1687
1688         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR &&
1689             req->rq_repmsg->status == -ENOTCONN)
1690                 GOTO(out, rc = req->rq_repmsg->status);
1691
1692         /* The transno had better not change over replay. */
1693         LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1694
1695         DEBUG_REQ(D_HA, req, "got rep");
1696
1697         /* let the callback do fixups, possibly including in the request */
1698         if (req->rq_replay_cb)
1699                 req->rq_replay_cb(req);
1700
1701         if (req->rq_replied && req->rq_repmsg->status != aa->praa_old_status) {
1702                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
1703                           req->rq_repmsg->status, aa->praa_old_status);
1704         } else {
1705                 /* Put it back for re-replay. */
1706                 req->rq_repmsg->status = aa->praa_old_status;
1707         }
1708
1709         spin_lock_irqsave(&imp->imp_lock, flags);
1710         imp->imp_last_replay_transno = req->rq_transno;
1711         spin_unlock_irqrestore(&imp->imp_lock, flags);
1712
1713         /* continue with recovery */
1714         rc = ptlrpc_import_recovery_state_machine(imp);
1715  out:
1716         req->rq_send_state = aa->praa_old_state;
1717
1718         if (rc != 0)
1719                 /* this replay failed, so restart recovery */
1720                 ptlrpc_connect_import(imp, NULL);
1721
1722         RETURN(rc);
1723 }
1724
1725
1726 int ptlrpc_replay_req(struct ptlrpc_request *req)
1727 {
1728         struct ptlrpc_replay_async_args *aa;
1729         ENTRY;
1730
1731         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
1732
1733         /* Not handling automatic bulk replay yet (or ever?) */
1734         LASSERT(req->rq_bulk == NULL);
1735
1736         DEBUG_REQ(D_HA, req, "REPLAY");
1737
1738         LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
1739         aa = (struct ptlrpc_replay_async_args *)&req->rq_async_args;
1740         memset(aa, 0, sizeof *aa);
1741
1742         /* Prepare request to be resent with ptlrpcd */
1743         aa->praa_old_state = req->rq_send_state;
1744         req->rq_send_state = LUSTRE_IMP_REPLAY;
1745         req->rq_phase = RQ_PHASE_NEW;
1746         aa->praa_old_status = req->rq_repmsg->status;
1747         req->rq_status = 0;
1748
1749         req->rq_interpret_reply = ptlrpc_replay_interpret;
1750         atomic_inc(&req->rq_import->imp_replay_inflight);
1751         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
1752
1753         ptlrpcd_add_req(req);
1754         RETURN(0);
1755 }
1756
1757 void ptlrpc_abort_inflight(struct obd_import *imp)
1758 {
1759         unsigned long flags;
1760         struct list_head *tmp, *n;
1761         ENTRY;
1762
1763         /* Make sure that no new requests get processed for this import.
1764          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1765          * this flag and then putting requests on sending_list or delayed_list.
1766          */
1767         spin_lock_irqsave(&imp->imp_lock, flags);
1768
1769         /* XXX locking?  Maybe we should remove each request with the list
1770          * locked?  Also, how do we know if the requests on the list are
1771          * being freed at this time?
1772          */
1773         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1774                 struct ptlrpc_request *req =
1775                         list_entry(tmp, struct ptlrpc_request, rq_list);
1776
1777                 DEBUG_REQ(D_HA, req, "inflight");
1778
1779                 spin_lock (&req->rq_lock);
1780                 if (req->rq_import_generation < imp->imp_generation) {
1781                         req->rq_err = 1;
1782                         ptlrpc_wake_client_req(req);
1783                 }
1784                 spin_unlock (&req->rq_lock);
1785         }
1786
1787         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1788                 struct ptlrpc_request *req =
1789                         list_entry(tmp, struct ptlrpc_request, rq_list);
1790
1791                 DEBUG_REQ(D_HA, req, "aborting waiting req");
1792
1793                 spin_lock (&req->rq_lock);
1794                 if (req->rq_import_generation < imp->imp_generation) {
1795                         req->rq_err = 1;
1796                         ptlrpc_wake_client_req(req);
1797                 }
1798                 spin_unlock (&req->rq_lock);
1799         }
1800
1801         /* Last chance to free reqs left on the replay list, but we
1802          * will still leak reqs that haven't comitted.  */
1803         if (imp->imp_replayable)
1804                 ptlrpc_free_committed(imp);
1805
1806         spin_unlock_irqrestore(&imp->imp_lock, flags);
1807
1808         EXIT;
1809 }
1810
1811 static __u64 ptlrpc_last_xid = 0;
1812 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1813
1814 __u64 ptlrpc_next_xid(void)
1815 {
1816         __u64 tmp;
1817         spin_lock(&ptlrpc_last_xid_lock);
1818         tmp = ++ptlrpc_last_xid;
1819         spin_unlock(&ptlrpc_last_xid_lock);
1820         return tmp;
1821 }
1822
1823 __u64 ptlrpc_sample_next_xid(void)
1824 {
1825         __u64 tmp;
1826         spin_lock(&ptlrpc_last_xid_lock);
1827         tmp = ptlrpc_last_xid + 1;
1828         spin_unlock(&ptlrpc_last_xid_lock);
1829         return tmp;
1830 }
1831 EXPORT_SYMBOL(ptlrpc_sample_next_xid);