Whamcloud - gitweb
smash the HEAD with the contents of b_cmd. HEAD_PRE_CMD_SMASH and
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <errno.h>
26 #include <signal.h>
27 #include <liblustre.h>
28 #endif
29
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_ha.h>
34 #include <linux/lustre_import.h>
35
36 #include "ptlrpc_internal.h"
37
38 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
39                         struct ptlrpc_client *cl)
40 {
41         cl->cli_request_portal = req_portal;
42         cl->cli_reply_portal   = rep_portal;
43         cl->cli_name           = name;
44 }
45
46 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
47 {
48         struct ptlrpc_connection *c;
49         struct ptlrpc_peer peer;
50         int err;
51
52         err = ptlrpc_uuid_to_peer(uuid, &peer);
53         if (err != 0) {
54                 CERROR("cannot find peer %s!\n", uuid->uuid);
55                 return NULL;
56         }
57
58         c = ptlrpc_get_connection(&peer, uuid);
59         if (c) {
60                 memcpy(c->c_remote_uuid.uuid,
61                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
62         }
63
64         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
65
66         return c;
67 }
68
69 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
70                                  struct obd_uuid *uuid)
71 {
72         struct ptlrpc_peer peer;
73         int err;
74
75         err = ptlrpc_uuid_to_peer(uuid, &peer);
76         if (err != 0) {
77                 CERROR("cannot find peer %s!\n", uuid->uuid);
78                 return;
79         }
80
81         memcpy(&conn->c_peer, &peer, sizeof (peer));
82         return;
83 }
84
85 static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
86 {
87         struct ptlrpc_bulk_desc *desc;
88
89         OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages]));
90         if (!desc)
91                 return NULL;
92
93         spin_lock_init(&desc->bd_lock);
94         init_waitqueue_head(&desc->bd_waitq);
95         desc->bd_max_iov = npages;
96         desc->bd_iov_count = 0;
97         desc->bd_md_h = PTL_INVALID_HANDLE;
98         desc->bd_portal = portal;
99         desc->bd_type = type;
100         
101         return desc;
102 }
103
104 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
105                                                int npages, int type, int portal)
106 {
107         struct obd_import *imp = req->rq_import;
108         struct ptlrpc_bulk_desc *desc;
109
110         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
111         desc = new_bulk(npages, type, portal);
112         if (desc == NULL)
113                 RETURN(NULL);
114
115         desc->bd_import_generation = req->rq_import_generation;
116         desc->bd_import = class_import_get(imp);
117         desc->bd_req = req;
118
119         desc->bd_cbid.cbid_fn  = client_bulk_callback;
120         desc->bd_cbid.cbid_arg = desc;
121
122         /* This makes req own desc, and free it when she frees herself */
123         req->rq_bulk = desc;
124
125         return desc;
126 }
127
128 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
129                                                int npages, int type, int portal)
130 {
131         struct obd_export *exp = req->rq_export;
132         struct ptlrpc_bulk_desc *desc;
133
134         LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
135
136         desc = new_bulk(npages, type, portal);
137         if (desc == NULL)
138                 RETURN(NULL);
139
140         desc->bd_export = class_export_get(exp);
141         desc->bd_req = req;
142
143         desc->bd_cbid.cbid_fn  = server_bulk_callback;
144         desc->bd_cbid.cbid_arg = desc;
145
146         /* NB we don't assign rq_bulk here; server-side requests are
147          * re-used, and the handler frees the bulk desc explicitly. */
148
149         return desc;
150 }
151
152 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
153                            struct page *page, int pageoffset, int len)
154 {
155         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
156         LASSERT(page != NULL);
157         LASSERT(pageoffset >= 0);
158         LASSERT(len > 0);
159         LASSERT(pageoffset + len <= PAGE_SIZE);
160
161         desc->bd_nob += len;
162
163         pers_bulk_add_page(desc, page, pageoffset, len);
164 }
165
166 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
167 {
168         ENTRY;
169
170         LASSERT(desc != NULL);
171         LASSERT(desc->bd_iov_count != 0x5a5a5a5a); /* not freed already */
172         LASSERT(!desc->bd_network_rw);         /* network hands off or */
173         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
174         if (desc->bd_export)
175                 class_export_put(desc->bd_export);
176         else
177                 class_import_put(desc->bd_import);
178
179         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, 
180                                 bd_iov[desc->bd_max_iov]));
181         EXIT;
182 }
183
184 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
185                                        int count, int *lengths, char **bufs)
186 {
187         struct ptlrpc_request *request;
188         int rc;
189         ENTRY;
190
191         LASSERT((unsigned long)imp > 0x1000);
192
193         OBD_ALLOC(request, sizeof(*request));
194         if (!request) {
195                 CERROR("request allocation out of memory\n");
196                 RETURN(NULL);
197         }
198
199         rc = lustre_pack_request(request, count, lengths, bufs);
200         if (rc) {
201                 CERROR("cannot pack request %d\n", rc);
202                 OBD_FREE(request, sizeof(*request));
203                 RETURN(NULL);
204         }
205
206         if (imp->imp_server_timeout)
207                 request->rq_timeout = obd_timeout / 2;
208         else
209                 request->rq_timeout = obd_timeout;
210         request->rq_send_state = LUSTRE_IMP_FULL;
211         request->rq_type = PTL_RPC_MSG_REQUEST;
212         request->rq_import = class_import_get(imp);
213
214         request->rq_req_cbid.cbid_fn  = request_out_callback;
215         request->rq_req_cbid.cbid_arg = request;
216
217         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
218         request->rq_reply_cbid.cbid_arg = request;
219         
220         request->rq_phase = RQ_PHASE_NEW;
221
222         /* XXX FIXME bug 249 */
223         request->rq_request_portal = imp->imp_client->cli_request_portal;
224         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
225
226         spin_lock_init(&request->rq_lock);
227         INIT_LIST_HEAD(&request->rq_list);
228         INIT_LIST_HEAD(&request->rq_replay_list);
229         INIT_LIST_HEAD(&request->rq_set_chain);
230         init_waitqueue_head(&request->rq_reply_waitq);
231         request->rq_xid = ptlrpc_next_xid();
232         atomic_set(&request->rq_refcount, 1);
233
234         request->rq_reqmsg->opc = opcode;
235         request->rq_reqmsg->flags = 0;
236
237         RETURN(request);
238 }
239
240 struct ptlrpc_request_set *ptlrpc_prep_set(void)
241 {
242         struct ptlrpc_request_set *set;
243
244         OBD_ALLOC(set, sizeof *set);
245         if (!set)
246                 RETURN(NULL);
247         INIT_LIST_HEAD(&set->set_requests);
248         init_waitqueue_head(&set->set_waitq);
249         set->set_remaining = 0;
250         spin_lock_init(&set->set_new_req_lock);
251         INIT_LIST_HEAD(&set->set_new_requests);
252
253         RETURN(set);
254 }
255
256 /* Finish with this set; opposite of prep_set. */
257 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
258 {
259         struct list_head *tmp;
260         struct list_head *next;
261         int               expected_phase;
262         int               n = 0;
263         ENTRY;
264
265         /* Requests on the set should either all be completed, or all be new */
266         expected_phase = (set->set_remaining == 0) ?
267                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
268         list_for_each (tmp, &set->set_requests) {
269                 struct ptlrpc_request *req =
270                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
271
272                 LASSERT(req->rq_phase == expected_phase);
273                 n++;
274         }
275
276         LASSERT(set->set_remaining == 0 || set->set_remaining == n);
277
278         list_for_each_safe(tmp, next, &set->set_requests) {
279                 struct ptlrpc_request *req =
280                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
281                 list_del_init(&req->rq_set_chain);
282
283                 LASSERT(req->rq_phase == expected_phase);
284
285                 if (req->rq_phase == RQ_PHASE_NEW) {
286
287                         if (req->rq_interpret_reply != NULL) {
288                                 int (*interpreter)(struct ptlrpc_request *,
289                                                    void *, int) =
290                                         req->rq_interpret_reply;
291
292                                 /* higher level (i.e. LOV) failed;
293                                  * let the sub reqs clean up */
294                                 req->rq_status = -EBADR;
295                                 interpreter(req, &req->rq_async_args,
296                                             req->rq_status);
297                         }
298                         set->set_remaining--;
299                 }
300
301                 req->rq_set = NULL;
302                 ptlrpc_req_finished (req);
303         }
304
305         LASSERT(set->set_remaining == 0);
306
307         OBD_FREE(set, sizeof(*set));
308         EXIT;
309 }
310
311 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
312                         struct ptlrpc_request *req)
313 {
314         /* The set takes over the caller's request reference */
315         list_add_tail(&req->rq_set_chain, &set->set_requests);
316         req->rq_set = set;
317         set->set_remaining++;
318         atomic_inc(&req->rq_import->imp_inflight);
319 }
320
321 /* lock so many callers can add things, the context that owns the set
322  * is supposed to notice these and move them into the set proper. */
323 void ptlrpc_set_add_new_req(struct ptlrpc_request_set *set,
324                             struct ptlrpc_request *req)
325 {
326         unsigned long flags;
327         spin_lock_irqsave(&set->set_new_req_lock, flags);
328         /* The set takes over the caller's request reference */
329         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
330         req->rq_set = set;
331         spin_unlock_irqrestore(&set->set_new_req_lock, flags);
332 }
333
334 /*
335  * Based on the current state of the import, determine if the request
336  * can be sent, is an error, or should be delayed.
337  *
338  * Returns true if this request should be delayed. If false, and
339  * *status is set, then the request can not be sent and *status is the
340  * error code.  If false and status is 0, then request can be sent.
341  *
342  * The imp->imp_lock must be held.
343  */
344 static int ptlrpc_import_delay_req(struct obd_import *imp, 
345                                    struct ptlrpc_request *req, int *status)
346 {
347         int delay = 0;
348         ENTRY;
349
350         LASSERT (status != NULL);
351         *status = 0;
352
353         if (imp->imp_state == LUSTRE_IMP_NEW) {
354                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
355                 *status = -EIO;
356                 LBUG();
357         }
358         else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
359                 DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
360                 *status = -EIO;
361         }
362         /* allow CONNECT even if import is invalid */
363         else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
364                  imp->imp_state == LUSTRE_IMP_CONNECTING) {
365                 ;
366         }
367         /*
368          * If the import has been invalidated (such as by an OST failure), the
369          * request must fail with -EIO.  
370          */
371         else if (imp->imp_invalid) {
372                 DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
373                 *status = -EIO;
374         } 
375         else if (req->rq_import_generation != imp->imp_generation) {
376                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
377                 *status = -EIO;
378         } 
379         else if (req->rq_send_state != imp->imp_state) {
380                 if (imp->imp_obd->obd_no_recov || imp->imp_dlm_fake 
381                     || req->rq_no_delay) 
382                         *status = -EWOULDBLOCK;
383                 else
384                         delay = 1;
385         }
386
387         RETURN(delay);
388 }
389
390 static int ptlrpc_check_reply(struct ptlrpc_request *req)
391 {
392         unsigned long flags;
393         int rc = 0;
394         ENTRY;
395
396         /* serialise with network callback */
397         spin_lock_irqsave (&req->rq_lock, flags);
398
399         if (req->rq_replied) {
400                 DEBUG_REQ(D_NET, req, "REPLIED:");
401                 GOTO(out, rc = 1);
402         }
403
404         if (req->rq_err) {
405                 DEBUG_REQ(D_ERROR, req, "ABORTED:");
406                 GOTO(out, rc = 1);
407         }
408
409         if (req->rq_resend) {
410                 DEBUG_REQ(D_ERROR, req, "RESEND:");
411                 GOTO(out, rc = 1);
412         }
413
414         if (req->rq_restart) {
415                 DEBUG_REQ(D_ERROR, req, "RESTART:");
416                 GOTO(out, rc = 1);
417         }
418         EXIT;
419  out:
420         spin_unlock_irqrestore (&req->rq_lock, flags);
421         DEBUG_REQ(D_NET, req, "rc = %d for", rc);
422         return rc;
423 }
424
425 static int ptlrpc_check_status(struct ptlrpc_request *req)
426 {
427         int err;
428         ENTRY;
429
430         err = req->rq_repmsg->status;
431         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
432                 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR, err == %d", 
433                           err);
434                 RETURN(err < 0 ? err : -EINVAL);
435         }
436
437         if (err < 0) {
438                 DEBUG_REQ(D_INFO, req, "status is %d", err);
439         } else if (err > 0) {
440                 /* XXX: translate this error from net to host */
441                 DEBUG_REQ(D_INFO, req, "status is %d", err);
442         }
443
444         RETURN(err);
445 }
446
447 static int after_reply(struct ptlrpc_request *req)
448 {
449         unsigned long flags;
450         struct obd_import *imp = req->rq_import;
451         int rc;
452         ENTRY;
453
454         LASSERT(!req->rq_receiving_reply);
455
456         /* NB Until this point, the whole of the incoming message,
457          * including buflens, status etc is in the sender's byte order. */
458
459 #if SWAB_PARANOIA
460         /* Clear reply swab mask; this is a new reply in sender's byte order */
461         req->rq_rep_swab_mask = 0;
462 #endif
463         LASSERT (req->rq_nob_received <= req->rq_replen);
464         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
465         if (rc) {
466                 CERROR("unpack_rep failed: %d\n", rc);
467                 RETURN(-EPROTO);
468         }
469
470         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
471             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
472                 CERROR("invalid packet type received (type=%u)\n",
473                        req->rq_repmsg->type);
474                 RETURN(-EPROTO);
475         }
476
477         /* Store transno in reqmsg for replay. */
478         req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
479
480         rc = ptlrpc_check_status(req);
481
482         /* Either we've been evicted, or the server has failed for
483          * some reason. Try to reconnect, and if that fails, punt to the
484          * upcall. */
485         if (rc == -ENOTCONN) {
486                 if (req->rq_send_state != LUSTRE_IMP_FULL ||
487                     imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
488                         RETURN(-ENOTCONN);
489                 }
490
491                 ptlrpc_request_handle_notconn(req);
492
493                 RETURN(rc);
494         }
495
496         if (req->rq_import->imp_replayable) {
497                 spin_lock_irqsave(&imp->imp_lock, flags);
498                 if (req->rq_replay || req->rq_transno != 0)
499                         ptlrpc_retain_replayable_request(req, imp);
500                 else if (req->rq_commit_cb != NULL)
501                         req->rq_commit_cb(req);
502
503                 if (req->rq_transno > imp->imp_max_transno)
504                         imp->imp_max_transno = req->rq_transno;
505
506                 /* Replay-enabled imports return commit-status information. */
507                 if (req->rq_repmsg->last_committed)
508                         imp->imp_peer_committed_transno =
509                                 req->rq_repmsg->last_committed;
510                 ptlrpc_free_committed(imp);
511                 spin_unlock_irqrestore(&imp->imp_lock, flags);
512         }
513
514         RETURN(rc);
515 }
516
517 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
518 {
519         struct obd_import     *imp;
520         unsigned long          flags;
521         int rc;
522         ENTRY;
523
524         LASSERT(req->rq_phase == RQ_PHASE_NEW);
525         req->rq_phase = RQ_PHASE_RPC;
526
527         imp = req->rq_import;
528         spin_lock_irqsave(&imp->imp_lock, flags);
529
530         req->rq_import_generation = imp->imp_generation;
531
532         if (ptlrpc_import_delay_req(imp, req, &rc)) {
533                 spin_lock (&req->rq_lock);
534                 req->rq_waiting = 1;
535                 spin_unlock (&req->rq_lock);
536
537                 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
538                           "(%s != %s)",
539                           req->rq_reqmsg->status, 
540                           ptlrpc_import_state_name(req->rq_send_state),
541                           ptlrpc_import_state_name(imp->imp_state));
542                 LASSERT(list_empty (&req->rq_list));
543
544                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
545                 spin_unlock_irqrestore(&imp->imp_lock, flags);
546                 RETURN(0);
547         }
548
549         if (rc != 0) {
550                 spin_unlock_irqrestore(&imp->imp_lock, flags);
551                 req->rq_status = rc;
552                 req->rq_phase = RQ_PHASE_INTERPRET;
553                 RETURN(rc);
554         }
555
556         /* XXX this is the same as ptlrpc_queue_wait */
557         LASSERT(list_empty(&req->rq_list));
558         list_add_tail(&req->rq_list, &imp->imp_sending_list);
559         spin_unlock_irqrestore(&imp->imp_lock, flags);
560
561         req->rq_reqmsg->status = current->pid;
562         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc"
563                " %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
564                imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
565                req->rq_xid,
566                imp->imp_connection->c_peer.peer_ni->pni_name,
567                imp->imp_connection->c_peer.peer_nid,
568                req->rq_reqmsg->opc);
569
570         rc = ptl_send_rpc(req);
571         if (rc) {
572                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
573                 req->rq_timeout = 1;
574                 RETURN(rc);
575         }
576         RETURN(0);
577 }
578
579 int ptlrpc_check_set(struct ptlrpc_request_set *set)
580 {
581         unsigned long flags;
582         struct list_head *tmp;
583         int force_timer_recalc = 0;
584         ENTRY;
585
586         if (set->set_remaining == 0)
587                 RETURN(1);
588
589         list_for_each(tmp, &set->set_requests) {
590                 struct ptlrpc_request *req =
591                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
592                 struct obd_import *imp = req->rq_import;
593                 int rc = 0;
594
595                 if (req->rq_phase == RQ_PHASE_NEW &&
596                     ptlrpc_send_new_req(req)) {
597                         force_timer_recalc = 1;
598                 }
599
600                 if (!(req->rq_phase == RQ_PHASE_RPC ||
601                       req->rq_phase == RQ_PHASE_BULK ||
602                       req->rq_phase == RQ_PHASE_INTERPRET ||
603                       req->rq_phase == RQ_PHASE_COMPLETE)) {
604                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
605                         LBUG();
606                 }
607
608                 if (req->rq_phase == RQ_PHASE_COMPLETE)
609                         continue;
610
611                 if (req->rq_phase == RQ_PHASE_INTERPRET)
612                         GOTO(interpret, req->rq_status);
613
614                 if (req->rq_err) {
615                         ptlrpc_unregister_reply(req);
616                         if (req->rq_status == 0)
617                                 req->rq_status = -EIO;
618                         req->rq_phase = RQ_PHASE_INTERPRET;
619
620                         spin_lock_irqsave(&imp->imp_lock, flags);
621                         list_del_init(&req->rq_list);
622                         spin_unlock_irqrestore(&imp->imp_lock, flags);
623
624                         GOTO(interpret, req->rq_status);
625                 }
626
627                 /* ptlrpc_queue_wait->l_wait_event guarantees that rq_intr
628                  * will only be set after rq_timedout, but the oig waiting
629                  * path sets rq_intr irrespective of whether ptlrpcd has
630                  * seen a timeout.  our policy is to only interpret 
631                  * interrupted rpcs after they have timed out */
632                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting)) {
633                         /* NB could be on delayed list */
634                         ptlrpc_unregister_reply(req);
635                         req->rq_status = -EINTR;
636                         req->rq_phase = RQ_PHASE_INTERPRET;
637
638                         spin_lock_irqsave(&imp->imp_lock, flags);
639                         list_del_init(&req->rq_list);
640                         spin_unlock_irqrestore(&imp->imp_lock, flags);
641
642                         GOTO(interpret, req->rq_status);
643                 }
644
645                 if (req->rq_phase == RQ_PHASE_RPC) {
646                         if (req->rq_waiting || req->rq_resend) {
647                                 int status;
648
649                                 LASSERT (!ptlrpc_client_receiving_reply(req));
650                                 LASSERT (req->rq_bulk == NULL ||
651                                          !ptlrpc_bulk_active(req->rq_bulk));
652
653                                 spin_lock_irqsave(&imp->imp_lock, flags);
654
655                                 if (ptlrpc_import_delay_req(imp, req, &status)) {
656                                         spin_unlock_irqrestore(&imp->imp_lock,
657                                                                flags);
658                                         continue;
659                                 } 
660
661                                 list_del_init(&req->rq_list);
662                                 if (status != 0)  {
663                                         req->rq_status = status;
664                                         req->rq_phase = RQ_PHASE_INTERPRET;
665                                         spin_unlock_irqrestore(&imp->imp_lock,
666                                                                flags);
667                                         GOTO(interpret, req->rq_status);
668                                 }
669                                 if (req->rq_no_resend) {
670                                         req->rq_status = -ENOTCONN;
671                                         req->rq_phase = RQ_PHASE_INTERPRET;
672                                         spin_unlock_irqrestore(&imp->imp_lock,
673                                                                flags);
674                                         GOTO(interpret, req->rq_status);
675                                 }
676                                 list_add_tail(&req->rq_list,
677                                               &imp->imp_sending_list);
678
679                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
680
681                                 req->rq_waiting = 0;
682                                 if (req->rq_resend) {
683                                         lustre_msg_add_flags(req->rq_reqmsg,
684                                                              MSG_RESENT);
685                                         ptlrpc_unregister_reply(req);
686                                         if (req->rq_bulk) {
687                                                 __u64 old_xid = req->rq_xid;
688
689                                                 /* ensure previous bulk fails */
690                                                 req->rq_xid = ptlrpc_next_xid();
691                                                 CDEBUG(D_HA, "resend bulk "
692                                                        "old x"LPU64
693                                                        " new x"LPU64"\n",
694                                                        old_xid, req->rq_xid);
695                                         }
696                                 }
697
698                                 rc = ptl_send_rpc(req);
699                                 if (rc) {
700                                         DEBUG_REQ(D_HA, req, "send failed (%d)",
701                                                   rc);
702                                         force_timer_recalc = 1;
703                                         req->rq_timeout = 0;
704                                 }
705                                 /* need to reset the timeout */
706                                 force_timer_recalc = 1;
707                         }
708
709                         /* Still waiting for a reply? */
710                         if (ptlrpc_client_receiving_reply(req))
711                                 continue;
712
713                         /* Did we actually receive a reply? */
714                         if (!ptlrpc_client_replied(req))
715                                 continue;
716
717                         spin_lock_irqsave(&imp->imp_lock, flags);
718                         list_del_init(&req->rq_list);
719                         spin_unlock_irqrestore(&imp->imp_lock, flags);
720
721                         req->rq_status = after_reply(req);
722                         if (req->rq_resend) {
723                                 /* Add this req to the delayed list so
724                                    it can be errored if the import is
725                                    evicted after recovery. */
726                                 spin_lock_irqsave (&req->rq_lock, flags);
727                                 list_add_tail(&req->rq_list, 
728                                               &imp->imp_delayed_list);
729                                 spin_unlock_irqrestore(&req->rq_lock, flags);
730                                 continue;
731                         }
732
733                         /* If there is no bulk associated with this request,
734                          * then we're done and should let the interpreter
735                          * process the reply.  Similarly if the RPC returned
736                          * an error, and therefore the bulk will never arrive.
737                          */
738                         if (req->rq_bulk == NULL || req->rq_status != 0) {
739                                 req->rq_phase = RQ_PHASE_INTERPRET;
740                                 GOTO(interpret, req->rq_status);
741                         }
742
743                         req->rq_phase = RQ_PHASE_BULK;
744                 }
745
746                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
747                 if (ptlrpc_bulk_active(req->rq_bulk))
748                         continue;
749
750                 if (!req->rq_bulk->bd_success) {
751                         /* The RPC reply arrived OK, but the bulk screwed
752                          * up!  Dead wierd since the server told us the RPC
753                          * was good after getting the REPLY for her GET or
754                          * the ACK for her PUT. */
755                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
756                         LBUG();
757                 }
758
759                 req->rq_phase = RQ_PHASE_INTERPRET;
760
761         interpret:
762                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
763                 LASSERT(!req->rq_receiving_reply);
764
765                 ptlrpc_unregister_reply(req);
766                 if (req->rq_bulk != NULL)
767                         ptlrpc_unregister_bulk (req);
768
769                 req->rq_phase = RQ_PHASE_COMPLETE;
770
771                 if (req->rq_interpret_reply != NULL) {
772                         int (*interpreter)(struct ptlrpc_request *,void *,int) =
773                                 req->rq_interpret_reply;
774                         req->rq_status = interpreter(req, &req->rq_async_args,
775                                                      req->rq_status);
776                 }
777
778                 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:"
779                        "opc %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
780                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
781                        req->rq_xid,
782                        imp->imp_connection->c_peer.peer_ni->pni_name,
783                        imp->imp_connection->c_peer.peer_nid,
784                        req->rq_reqmsg->opc);
785
786                 set->set_remaining--;
787
788                 atomic_dec(&imp->imp_inflight);
789                 wake_up(&imp->imp_recovery_waitq);
790         }
791
792         /* If we hit an error, we want to recover promptly. */
793         RETURN(set->set_remaining == 0 || force_timer_recalc);
794 }
795
796 int ptlrpc_expire_one_request(struct ptlrpc_request *req)
797 {
798         unsigned long      flags;
799         struct obd_import *imp = req->rq_import;
800         ENTRY;
801
802         DEBUG_REQ(D_ERROR, req, "timeout");
803
804         spin_lock_irqsave (&req->rq_lock, flags);
805         req->rq_timedout = 1;
806         spin_unlock_irqrestore (&req->rq_lock, flags);
807
808         ptlrpc_unregister_reply (req);
809
810         if (req->rq_bulk != NULL)
811                 ptlrpc_unregister_bulk (req);
812
813         if (imp == NULL) {
814                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
815                 RETURN(1);
816         }
817
818         /* The DLM server doesn't want recovery run on its imports. */
819         if (imp->imp_dlm_fake)
820                 RETURN(1);
821
822         /* If this request is for recovery or other primordial tasks,
823          * then error it out here. */
824         if (req->rq_send_state != LUSTRE_IMP_FULL || 
825             imp->imp_obd->obd_no_recov) {
826                 spin_lock_irqsave (&req->rq_lock, flags);
827                 req->rq_status = -ETIMEDOUT;
828                 req->rq_err = 1;
829                 spin_unlock_irqrestore (&req->rq_lock, flags);
830                 RETURN(1);
831         }
832
833         ptlrpc_fail_import(imp, req->rq_import_generation);
834
835         RETURN(0);
836 }
837
838 int ptlrpc_expired_set(void *data)
839 {
840         struct ptlrpc_request_set *set = data;
841         struct list_head          *tmp;
842         time_t                     now = LTIME_S (CURRENT_TIME);
843         ENTRY;
844
845         LASSERT(set != NULL);
846
847         /* A timeout expired; see which reqs it applies to... */
848         list_for_each (tmp, &set->set_requests) {
849                 struct ptlrpc_request *req =
850                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
851
852                 /* request in-flight? */
853                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting 
854                        && !req->rq_resend) ||
855                       (req->rq_phase == RQ_PHASE_BULK)))
856                         continue;
857
858                 if (req->rq_timedout ||           /* already dealt with */
859                     req->rq_sent + req->rq_timeout > now) /* not expired */
860                         continue;
861
862                 /* deal with this guy */
863                 ptlrpc_expire_one_request (req);
864         }
865
866         /* When waiting for a whole set, we always to break out of the
867          * sleep so we can recalculate the timeout, or enable interrupts
868          * iff everyone's timed out.
869          */
870         RETURN(1);
871 }
872
873 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
874 {
875         unsigned long flags;
876         spin_lock_irqsave(&req->rq_lock, flags);
877         req->rq_intr = 1;
878         spin_unlock_irqrestore(&req->rq_lock, flags);
879 }
880
881 void ptlrpc_interrupted_set(void *data)
882 {
883         struct ptlrpc_request_set *set = data;
884         struct list_head *tmp;
885
886         LASSERT(set != NULL);
887         CERROR("INTERRUPTED SET %p\n", set);
888
889         list_for_each(tmp, &set->set_requests) {
890                 struct ptlrpc_request *req =
891                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
892
893                 if (req->rq_phase != RQ_PHASE_RPC)
894                         continue;
895
896                 ptlrpc_mark_interrupted(req);
897         }
898 }
899
900 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
901 {
902         struct list_head      *tmp;
903         time_t                 now = LTIME_S(CURRENT_TIME);
904         time_t                 deadline;
905         int                    timeout = 0;
906         struct ptlrpc_request *req;
907         ENTRY;
908
909         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
910
911         list_for_each(tmp, &set->set_requests) {
912                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
913
914                 /* request in-flight? */
915                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
916                       (req->rq_phase == RQ_PHASE_BULK)))
917                         continue;
918
919                 if (req->rq_timedout)   /* already timed out */
920                         continue;
921
922                 deadline = req->rq_sent + req->rq_timeout;
923                 if (deadline <= now)    /* actually expired already */
924                         timeout = 1;    /* ASAP */
925                 else if (timeout == 0 || timeout > deadline - now)
926                         timeout = deadline - now;
927         }
928         RETURN(timeout);
929 }
930                 
931
932 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
933 {
934         struct list_head      *tmp;
935         struct ptlrpc_request *req;
936         struct l_wait_info     lwi;
937         int                    rc, timeout;
938         ENTRY;
939
940         LASSERT(!list_empty(&set->set_requests));
941         list_for_each(tmp, &set->set_requests) {
942                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
943                 if (req->rq_phase == RQ_PHASE_NEW)
944                         (void)ptlrpc_send_new_req(req);
945         }
946
947         do {
948                 timeout = ptlrpc_set_next_timeout(set);
949
950                 /* wait until all complete, interrupted, or an in-flight
951                  * req times out */
952                 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
953                        set, timeout);
954                 lwi = LWI_TIMEOUT_INTR((timeout ? timeout : 1) * HZ,
955                                        ptlrpc_expired_set, 
956                                        ptlrpc_interrupted_set, set);
957                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
958
959                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
960
961                 /* -EINTR => all requests have been flagged rq_intr so next
962                  * check completes.
963                  * -ETIMEOUTD => someone timed out.  When all reqs have
964                  * timed out, signals are enabled allowing completion with
965                  * EINTR.
966                  * I don't really care if we go once more round the loop in
967                  * the error cases -eeb. */
968         } while (rc != 0);
969
970         LASSERT(set->set_remaining == 0);
971
972         rc = 0;
973         list_for_each(tmp, &set->set_requests) {
974                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
975
976                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
977                 if (req->rq_status != 0)
978                         rc = req->rq_status;
979         }
980
981         if (set->set_interpret != NULL) {
982                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
983                         set->set_interpret;
984                 rc = interpreter (set, &set->set_args, rc);
985         }
986
987         RETURN(rc);
988 }
989
990 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
991 {
992         ENTRY;
993         if (request == NULL) {
994                 EXIT;
995                 return;
996         }
997
998         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
999         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
1000         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
1001         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
1002
1003         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
1004          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
1005         if (request->rq_import != NULL) {
1006                 unsigned long flags = 0;
1007                 if (!locked)
1008                         spin_lock_irqsave(&request->rq_import->imp_lock, flags);
1009                 list_del_init(&request->rq_replay_list);
1010                 if (!locked)
1011                         spin_unlock_irqrestore(&request->rq_import->imp_lock,
1012                                                flags);
1013         }
1014         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
1015
1016         if (atomic_read(&request->rq_refcount) != 0) {
1017                 DEBUG_REQ(D_ERROR, request,
1018                           "freeing request with nonzero refcount");
1019                 LBUG();
1020         }
1021
1022         if (request->rq_repmsg != NULL) {
1023                 OBD_FREE(request->rq_repmsg, request->rq_replen);
1024                 request->rq_repmsg = NULL;
1025         }
1026         if (request->rq_reqmsg != NULL) {
1027                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
1028                 request->rq_reqmsg = NULL;
1029         }
1030         if (request->rq_export != NULL) {
1031                 class_export_put(request->rq_export);
1032                 request->rq_export = NULL;
1033         }
1034         if (request->rq_import != NULL) {
1035                 class_import_put(request->rq_import);
1036                 request->rq_import = NULL;
1037         }
1038         if (request->rq_bulk != NULL)
1039                 ptlrpc_free_bulk(request->rq_bulk);
1040
1041         OBD_FREE(request, sizeof(*request));
1042         EXIT;
1043 }
1044
1045 void ptlrpc_free_req(struct ptlrpc_request *request)
1046 {
1047         __ptlrpc_free_req(request, 0);
1048 }
1049
1050 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
1051 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
1052 {
1053         LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
1054         (void)__ptlrpc_req_finished(request, 1);
1055 }
1056
1057 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
1058 {
1059         ENTRY;
1060         if (request == NULL)
1061                 RETURN(1);
1062
1063         if (request == (void *)(unsigned long)(0x5a5a5a5a5a5a5a5a) ||
1064             request->rq_reqmsg == (void *)(unsigned long)(0x5a5a5a5a5a5a5a5a)) {
1065                 CERROR("dereferencing freed request (bug 575)\n");
1066                 LBUG();
1067                 RETURN(1);
1068         }
1069
1070         DEBUG_REQ(D_INFO, request, "refcount now %u",
1071                   atomic_read(&request->rq_refcount) - 1);
1072
1073         if (atomic_dec_and_test(&request->rq_refcount)) {
1074                 __ptlrpc_free_req(request, locked);
1075                 RETURN(1);
1076         }
1077
1078         RETURN(0);
1079 }
1080
1081 void ptlrpc_req_finished(struct ptlrpc_request *request)
1082 {
1083         __ptlrpc_req_finished(request, 0);
1084 }
1085
1086 /* Disengage the client's reply buffer from the network
1087  * NB does _NOT_ unregister any client-side bulk.
1088  * IDEMPOTENT, but _not_ safe against concurrent callers.
1089  * The request owner (i.e. the thread doing the I/O) must call...
1090  */
1091 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
1092 {
1093         int                rc;
1094         wait_queue_head_t *wq;
1095         struct l_wait_info lwi;
1096
1097         LASSERT(!in_interrupt ());             /* might sleep */
1098
1099         if (!ptlrpc_client_receiving_reply(request))
1100                 return;
1101
1102         rc = PtlMDUnlink (request->rq_reply_md_h);
1103         if (rc == PTL_MD_INVALID) {
1104                 LASSERT (!ptlrpc_client_receiving_reply(request));
1105                 return;
1106         }
1107         
1108         LASSERT (rc == PTL_OK);
1109
1110         if (request->rq_set == NULL)
1111                 wq = &request->rq_set->set_waitq;
1112         else
1113                 wq = &request->rq_reply_waitq;
1114
1115         for (;;) {
1116                 /* Network access will complete in finite time but the HUGE
1117                  * timeout lets us CWARN for visibility of sluggish NALs */
1118                 lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
1119                 rc = l_wait_event (*wq, !ptlrpc_client_receiving_reply(request), &lwi);
1120                 if (rc == 0)
1121                         return;
1122
1123                 LASSERT (rc == -ETIMEDOUT);
1124                 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout");
1125         }
1126 }
1127
1128 /* caller must hold imp->imp_lock */
1129 void ptlrpc_free_committed(struct obd_import *imp)
1130 {
1131         struct list_head *tmp, *saved;
1132         struct ptlrpc_request *req;
1133         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1134         ENTRY;
1135
1136         LASSERT(imp != NULL);
1137
1138         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1139
1140         CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1141                imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1142
1143         list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1144                 req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1145
1146                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
1147                 LASSERT(req != last_req);
1148                 last_req = req;
1149
1150                 if (req->rq_import_generation < imp->imp_generation) {
1151                         DEBUG_REQ(D_HA, req, "freeing request with old gen");
1152                         GOTO(free_req, 0);
1153                 }
1154
1155                 if (req->rq_replay) {
1156                         DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1157                         continue;
1158                 }
1159
1160                 /* not yet committed */
1161                 if (req->rq_transno > imp->imp_peer_committed_transno) {
1162                         DEBUG_REQ(D_HA, req, "stopping search");
1163                         break;
1164                 }
1165
1166                 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1167                           imp->imp_peer_committed_transno);
1168 free_req:
1169                 if (req->rq_commit_cb != NULL)
1170                         req->rq_commit_cb(req);
1171                 list_del_init(&req->rq_replay_list);
1172                 __ptlrpc_req_finished(req, 1);
1173         }
1174
1175         EXIT;
1176         return;
1177 }
1178
1179 void ptlrpc_cleanup_client(struct obd_import *imp)
1180 {
1181         ENTRY;
1182         EXIT;
1183         return;
1184 }
1185
1186 void ptlrpc_resend_req(struct ptlrpc_request *req)
1187 {
1188         unsigned long flags;
1189
1190         DEBUG_REQ(D_HA, req, "going to resend");
1191         req->rq_reqmsg->handle.cookie = 0;
1192         req->rq_status = -EAGAIN;
1193
1194         spin_lock_irqsave (&req->rq_lock, flags);
1195         req->rq_resend = 1;
1196         req->rq_timedout = 0;
1197         if (req->rq_bulk) {
1198                 __u64 old_xid = req->rq_xid;
1199                 
1200                 /* ensure previous bulk fails */
1201                 req->rq_xid = ptlrpc_next_xid();
1202                 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
1203                        old_xid, req->rq_xid);
1204         }
1205         ptlrpc_wake_client_req(req);
1206         spin_unlock_irqrestore (&req->rq_lock, flags);
1207 }
1208
1209 /* XXX: this function and rq_status are currently unused */
1210 void ptlrpc_restart_req(struct ptlrpc_request *req)
1211 {
1212         unsigned long flags;
1213
1214         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1215         req->rq_status = -ERESTARTSYS;
1216
1217         spin_lock_irqsave (&req->rq_lock, flags);
1218         req->rq_restart = 1;
1219         req->rq_timedout = 0;
1220         ptlrpc_wake_client_req(req);
1221         spin_unlock_irqrestore (&req->rq_lock, flags);
1222 }
1223
1224 static int expired_request(void *data)
1225 {
1226         struct ptlrpc_request *req = data;
1227         ENTRY;
1228
1229         RETURN(ptlrpc_expire_one_request(req));
1230 }
1231
1232 static void interrupted_request(void *data)
1233 {
1234         unsigned long flags;
1235
1236         struct ptlrpc_request *req = data;
1237         DEBUG_REQ(D_HA, req, "request interrupted");
1238         spin_lock_irqsave (&req->rq_lock, flags);
1239         req->rq_intr = 1;
1240         spin_unlock_irqrestore (&req->rq_lock, flags);
1241 }
1242
1243 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1244 {
1245         ENTRY;
1246         atomic_inc(&req->rq_refcount);
1247         RETURN(req);
1248 }
1249
1250 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1251                                       struct obd_import *imp)
1252 {
1253         struct list_head *tmp;
1254
1255         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1256
1257         /* don't re-add requests that have been replayed */
1258         if (!list_empty(&req->rq_replay_list))
1259                 return;
1260
1261         lustre_msg_add_flags(req->rq_reqmsg,
1262                              MSG_REPLAY);
1263
1264         LASSERT(imp->imp_replayable);
1265         /* Balanced in ptlrpc_free_committed, usually. */
1266         ptlrpc_request_addref(req);
1267         list_for_each_prev(tmp, &imp->imp_replay_list) {
1268                 struct ptlrpc_request *iter =
1269                         list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1270
1271                 /* We may have duplicate transnos if we create and then
1272                  * open a file, or for closes retained if to match creating
1273                  * opens, so use req->rq_xid as a secondary key.
1274                  * (See bugs 684, 685, and 428.)
1275                  * XXX no longer needed, but all opens need transnos!
1276                  */
1277                 if (iter->rq_transno > req->rq_transno)
1278                         continue;
1279
1280                 if (iter->rq_transno == req->rq_transno) {
1281                         LASSERT(iter->rq_xid != req->rq_xid);
1282                         if (iter->rq_xid > req->rq_xid)
1283                                 continue;
1284                 }
1285
1286                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
1287                 return;
1288         }
1289
1290         list_add_tail(&req->rq_replay_list, &imp->imp_replay_list);
1291 }
1292
1293 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1294 {
1295         int rc = 0;
1296         int brc;
1297         struct l_wait_info lwi;
1298         struct obd_import *imp = req->rq_import;
1299         unsigned long flags;
1300         int timeout = 0;
1301         ENTRY;
1302
1303         LASSERT(req->rq_set == NULL);
1304         LASSERT(!req->rq_receiving_reply);
1305         atomic_inc(&imp->imp_inflight);
1306
1307         /* for distributed debugging */
1308         req->rq_reqmsg->status = current->pid;
1309         LASSERT(imp->imp_obd != NULL);
1310         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc "
1311                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1312                imp->imp_obd->obd_uuid.uuid,
1313                req->rq_reqmsg->status, req->rq_xid,
1314                imp->imp_connection->c_peer.peer_ni->pni_name,
1315                imp->imp_connection->c_peer.peer_nid,
1316                req->rq_reqmsg->opc);
1317
1318         /* Mark phase here for a little debug help */
1319         req->rq_phase = RQ_PHASE_RPC;
1320
1321         spin_lock_irqsave(&imp->imp_lock, flags);
1322         req->rq_import_generation = imp->imp_generation;
1323 restart:
1324         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1325                 list_del(&req->rq_list);
1326
1327                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1328                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1329
1330                 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%s != %s)",
1331                           current->comm, 
1332                           ptlrpc_import_state_name(req->rq_send_state), 
1333                           ptlrpc_import_state_name(imp->imp_state));
1334                 lwi = LWI_INTR(interrupted_request, req);
1335                 rc = l_wait_event(req->rq_reply_waitq,
1336                                   (req->rq_send_state == imp->imp_state ||
1337                                    req->rq_err),
1338                                   &lwi);
1339                 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%s == %s or %d == 1)",
1340                           current->comm, 
1341                           ptlrpc_import_state_name(imp->imp_state), 
1342                           ptlrpc_import_state_name(req->rq_send_state),
1343                           req->rq_err);
1344
1345                 spin_lock_irqsave(&imp->imp_lock, flags);
1346                 list_del_init(&req->rq_list);
1347
1348                 if (req->rq_err) {
1349                         rc = -EIO;
1350                 } 
1351                 else if (req->rq_intr) {
1352                         rc = -EINTR;
1353                 }
1354                 else if (req->rq_no_resend) {
1355                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1356                         GOTO(out, rc = -ETIMEDOUT);
1357                 }
1358                 else {
1359                         GOTO(restart, rc);
1360                 }
1361         } 
1362
1363         if (rc != 0) {
1364                 list_del_init(&req->rq_list);
1365                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1366                 req->rq_status = rc; // XXX this ok?
1367                 GOTO(out, rc);
1368         }
1369
1370         if (req->rq_resend) {
1371                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1372
1373                 if (req->rq_bulk != NULL)
1374                         ptlrpc_unregister_bulk (req);
1375
1376                 DEBUG_REQ(D_HA, req, "resending: ");
1377         }
1378
1379         /* XXX this is the same as ptlrpc_set_wait */
1380         LASSERT(list_empty(&req->rq_list));
1381         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1382         spin_unlock_irqrestore(&imp->imp_lock, flags);
1383
1384         rc = ptl_send_rpc(req);
1385         if (rc) {
1386                 DEBUG_REQ(D_HA, req, "send failed (%d); recovering", rc);
1387                 timeout = 1;
1388         } else {
1389                 timeout = MAX(req->rq_timeout * HZ, 1);
1390                 DEBUG_REQ(D_NET, req, "-- sleeping");
1391         }
1392         lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1393                                req);
1394         l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req), &lwi);
1395         DEBUG_REQ(D_NET, req, "-- done sleeping");
1396
1397         CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:opc "
1398                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1399                imp->imp_obd->obd_uuid.uuid,
1400                req->rq_reqmsg->status, req->rq_xid,
1401                imp->imp_connection->c_peer.peer_ni->pni_name,
1402                imp->imp_connection->c_peer.peer_nid,
1403                req->rq_reqmsg->opc);
1404
1405         spin_lock_irqsave(&imp->imp_lock, flags);
1406         list_del_init(&req->rq_list);
1407         spin_unlock_irqrestore(&imp->imp_lock, flags);
1408
1409         /* If the reply was received normally, this just grabs the spinlock
1410          * (ensuring the reply callback has returned), sees that
1411          * req->rq_receiving_reply is clear and returns. */
1412         ptlrpc_unregister_reply (req);
1413
1414         if (req->rq_err)
1415                 GOTO(out, rc = -EIO);
1416
1417         /* Resend if we need to, unless we were interrupted. */
1418         if (req->rq_resend && !req->rq_intr) {
1419                 /* ...unless we were specifically told otherwise. */
1420                 if (req->rq_no_resend)
1421                         GOTO(out, rc = -ETIMEDOUT);
1422                 spin_lock_irqsave(&imp->imp_lock, flags);
1423                 goto restart;
1424         }
1425
1426         if (req->rq_intr) {
1427                 /* Should only be interrupted if we timed out. */
1428                 if (!req->rq_timedout)
1429                         DEBUG_REQ(D_ERROR, req,
1430                                   "rq_intr set but rq_timedout not");
1431                 GOTO(out, rc = -EINTR);
1432         }
1433
1434         if (req->rq_timedout) {                 /* non-recoverable timeout */
1435                 GOTO(out, rc = -ETIMEDOUT);
1436         }
1437
1438         if (!req->rq_replied) {
1439                 /* How can this be? -eeb */
1440                 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1441                 LBUG();
1442                 GOTO(out, rc = req->rq_status);
1443         }
1444
1445         rc = after_reply (req);
1446         /* NB may return +ve success rc */
1447         if (req->rq_resend) {
1448                 spin_lock_irqsave(&imp->imp_lock, flags);
1449                 goto restart;
1450         }
1451
1452  out:
1453         if (req->rq_bulk != NULL) {
1454                 if (rc >= 0) {                  
1455                         /* success so far.  Note that anything going wrong
1456                          * with bulk now, is EXTREMELY strange, since the
1457                          * server must have believed that the bulk
1458                          * tranferred OK before she replied with success to
1459                          * me. */
1460                         lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1461                         brc = l_wait_event(req->rq_reply_waitq,
1462                                            !ptlrpc_bulk_active(req->rq_bulk),
1463                                            &lwi);
1464                         LASSERT(brc == 0 || brc == -ETIMEDOUT);
1465                         if (brc != 0) {
1466                                 LASSERT(brc == -ETIMEDOUT);
1467                                 DEBUG_REQ(D_ERROR, req, "bulk timed out");
1468                                 rc = brc;
1469                         } else if (!req->rq_bulk->bd_success) {
1470                                 DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1471                                 rc = -EIO;
1472                         }
1473                 }
1474                 if (rc < 0)
1475                         ptlrpc_unregister_bulk (req);
1476         }
1477
1478         LASSERT(!req->rq_receiving_reply);
1479         req->rq_phase = RQ_PHASE_INTERPRET;
1480
1481         atomic_dec(&imp->imp_inflight);
1482         wake_up(&imp->imp_recovery_waitq);
1483         RETURN(rc);
1484 }
1485
1486 struct ptlrpc_replay_async_args {
1487         int praa_old_state;
1488         int praa_old_status;
1489 };
1490
1491 static int ptlrpc_replay_interpret(struct ptlrpc_request *req,
1492                                     void * data, int rc)
1493 {
1494         struct ptlrpc_replay_async_args *aa = data;
1495         struct obd_import *imp = req->rq_import;
1496         unsigned long flags;
1497
1498         atomic_dec(&imp->imp_replay_inflight);
1499         
1500         if (!req->rq_replied) {
1501                 CERROR("request replay timed out, restarting recovery\n");
1502                 GOTO(out, rc = -ETIMEDOUT);
1503         }
1504
1505 #if SWAB_PARANOIA
1506         /* Clear reply swab mask; this is a new reply in sender's byte order */
1507         req->rq_rep_swab_mask = 0;
1508 #endif
1509         LASSERT (req->rq_nob_received <= req->rq_replen);
1510         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
1511         if (rc) {
1512                 CERROR("unpack_rep failed: %d\n", rc);
1513                 GOTO(out, rc = -EPROTO);
1514         }
1515
1516         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR && 
1517             req->rq_repmsg->status == -ENOTCONN) 
1518                 GOTO(out, rc = req->rq_repmsg->status);
1519
1520         /* The transno had better not change over replay. */
1521         LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1522
1523         DEBUG_REQ(D_HA, req, "got rep");
1524
1525         /* let the callback do fixups, possibly including in the request */
1526         if (req->rq_replay_cb)
1527                 req->rq_replay_cb(req);
1528
1529         if (req->rq_replied && req->rq_repmsg->status != aa->praa_old_status) {
1530                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
1531                           req->rq_repmsg->status, aa->praa_old_status);
1532         } else {
1533                 /* Put it back for re-replay. */
1534                 req->rq_repmsg->status = aa->praa_old_status;
1535         }
1536
1537         spin_lock_irqsave(&imp->imp_lock, flags);
1538         imp->imp_last_replay_transno = req->rq_transno;
1539         spin_unlock_irqrestore(&imp->imp_lock, flags);
1540
1541         /* continue with recovery */
1542         rc = ptlrpc_import_recovery_state_machine(imp);
1543  out:
1544         req->rq_send_state = aa->praa_old_state;
1545         
1546         if (rc != 0)
1547                 /* this replay failed, so restart recovery */
1548                 ptlrpc_connect_import(imp, NULL);
1549
1550         RETURN(rc);
1551 }
1552
1553
1554 int ptlrpc_replay_req(struct ptlrpc_request *req)
1555 {
1556         struct ptlrpc_replay_async_args *aa;
1557         ENTRY;
1558
1559         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
1560
1561         /* Not handling automatic bulk replay yet (or ever?) */
1562         LASSERT(req->rq_bulk == NULL);
1563
1564         DEBUG_REQ(D_HA, req, "REPLAY");
1565
1566         LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
1567         aa = (struct ptlrpc_replay_async_args *)&req->rq_async_args;
1568         memset(aa, 0, sizeof *aa);
1569
1570         /* Prepare request to be resent with ptlrpcd */
1571         aa->praa_old_state = req->rq_send_state;
1572         req->rq_send_state = LUSTRE_IMP_REPLAY;
1573         req->rq_phase = RQ_PHASE_NEW;
1574         /*
1575          * Q: "How can a req get on the replay list if it wasn't replied?"
1576          * A: "If we failed during the replay of this request, it will still
1577          *     be on the list, but rq_replied will have been reset to 0."
1578          */
1579         if (req->rq_replied) {
1580                 aa->praa_old_status = req->rq_repmsg->status;
1581                 req->rq_status = 0;
1582                 req->rq_replied = 0;
1583         }
1584
1585         req->rq_interpret_reply = ptlrpc_replay_interpret;
1586         atomic_inc(&req->rq_import->imp_replay_inflight);
1587         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
1588
1589         ptlrpcd_add_req(req);
1590         RETURN(0);
1591 }
1592
1593 void ptlrpc_abort_inflight(struct obd_import *imp)
1594 {
1595         unsigned long flags;
1596         struct list_head *tmp, *n;
1597         ENTRY;
1598
1599         /* Make sure that no new requests get processed for this import.
1600          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1601          * this flag and then putting requests on sending_list or delayed_list.
1602          */
1603         spin_lock_irqsave(&imp->imp_lock, flags);
1604
1605         /* XXX locking?  Maybe we should remove each request with the list
1606          * locked?  Also, how do we know if the requests on the list are
1607          * being freed at this time?
1608          */
1609         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1610                 struct ptlrpc_request *req =
1611                         list_entry(tmp, struct ptlrpc_request, rq_list);
1612
1613                 DEBUG_REQ(D_HA, req, "inflight");
1614
1615                 spin_lock (&req->rq_lock);
1616                 if (req->rq_import_generation < imp->imp_generation) {
1617                         req->rq_err = 1;
1618                         ptlrpc_wake_client_req(req);
1619                 }
1620                 spin_unlock (&req->rq_lock);
1621         }
1622
1623         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1624                 struct ptlrpc_request *req =
1625                         list_entry(tmp, struct ptlrpc_request, rq_list);
1626
1627                 DEBUG_REQ(D_HA, req, "aborting waiting req");
1628
1629                 spin_lock (&req->rq_lock);
1630                 if (req->rq_import_generation < imp->imp_generation) {
1631                         req->rq_err = 1;
1632                         ptlrpc_wake_client_req(req);
1633                 }
1634                 spin_unlock (&req->rq_lock);
1635         }
1636
1637         /* Last chance to free reqs left on the replay list, but we
1638          * will still leak reqs that haven't comitted.  */
1639         if (imp->imp_replayable)
1640                 ptlrpc_free_committed(imp);
1641
1642         spin_unlock_irqrestore(&imp->imp_lock, flags);
1643
1644         EXIT;
1645 }
1646
1647 static __u64 ptlrpc_last_xid = 0;
1648 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1649
1650 __u64 ptlrpc_next_xid(void)
1651 {
1652         __u64 tmp;
1653         spin_lock(&ptlrpc_last_xid_lock);
1654         tmp = ++ptlrpc_last_xid;
1655         spin_unlock(&ptlrpc_last_xid_lock);
1656         return tmp;
1657 }
1658
1659