Whamcloud - gitweb
Land b_smallfix onto HEAD (20040223_1817)
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <errno.h>
26 #include <signal.h>
27 #include <liblustre.h>
28 #endif
29
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_ha.h>
34 #include <linux/lustre_import.h>
35
36 #include "ptlrpc_internal.h"
37
38 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
39                         struct ptlrpc_client *cl)
40 {
41         cl->cli_request_portal = req_portal;
42         cl->cli_reply_portal   = rep_portal;
43         cl->cli_name           = name;
44 }
45
46 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
47 {
48         struct ptlrpc_connection *c;
49         struct ptlrpc_peer peer;
50         int err;
51
52         err = ptlrpc_uuid_to_peer(uuid, &peer);
53         if (err != 0) {
54                 CERROR("cannot find peer %s!\n", uuid->uuid);
55                 return NULL;
56         }
57
58         c = ptlrpc_get_connection(&peer, uuid);
59         if (c) {
60                 memcpy(c->c_remote_uuid.uuid,
61                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
62         }
63
64         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
65
66         return c;
67 }
68
69 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
70                                  struct obd_uuid *uuid)
71 {
72         struct ptlrpc_peer peer;
73         int err;
74
75         err = ptlrpc_uuid_to_peer(uuid, &peer);
76         if (err != 0) {
77                 CERROR("cannot find peer %s!\n", uuid->uuid);
78                 return;
79         }
80
81         memcpy(&conn->c_peer, &peer, sizeof (peer));
82         return;
83 }
84
85 static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
86 {
87         struct ptlrpc_bulk_desc *desc;
88
89         OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages]));
90         if (!desc)
91                 return NULL;
92
93         spin_lock_init(&desc->bd_lock);
94         init_waitqueue_head(&desc->bd_waitq);
95         desc->bd_max_pages = npages;
96         desc->bd_page_count = 0;
97         desc->bd_md_h = PTL_HANDLE_NONE;
98         desc->bd_portal = portal;
99         desc->bd_type = type;
100         
101         return desc;
102 }
103
104 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
105                                                int npages, int type, int portal)
106 {
107         struct obd_import *imp = req->rq_import;
108         struct ptlrpc_bulk_desc *desc;
109
110         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
111         desc = new_bulk(npages, type, portal);
112         if (desc == NULL)
113                 RETURN(NULL);
114
115         desc->bd_import_generation = req->rq_import_generation;
116         desc->bd_import = class_import_get(imp);
117         desc->bd_req = req;
118
119         desc->bd_cbid.cbid_fn  = client_bulk_callback;
120         desc->bd_cbid.cbid_arg = desc;
121
122         /* This makes req own desc, and free it when she frees herself */
123         req->rq_bulk = desc;
124
125         return desc;
126 }
127
128 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
129                                                int npages, int type, int portal)
130 {
131         struct obd_export *exp = req->rq_export;
132         struct ptlrpc_bulk_desc *desc;
133
134         LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
135
136         desc = new_bulk(npages, type, portal);
137         if (desc == NULL)
138                 RETURN(NULL);
139
140         desc->bd_export = class_export_get(exp);
141         desc->bd_req = req;
142
143         desc->bd_cbid.cbid_fn  = server_bulk_callback;
144         desc->bd_cbid.cbid_arg = desc;
145
146         /* NB we don't assign rq_bulk here; server-side requests are
147          * re-used, and the handler frees the bulk desc explicitly. */
148
149         return desc;
150 }
151
152 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
153                            struct page *page, int pageoffset, int len)
154 {
155 #ifdef __KERNEL__
156         ptl_kiov_t *kiov = &desc->bd_iov[desc->bd_page_count];
157 #else
158         struct iovec *iov = &desc->bd_iov[desc->bd_page_count];
159 #endif
160         LASSERT(desc->bd_page_count < desc->bd_max_pages);
161         LASSERT(page != NULL);
162         LASSERT(pageoffset >= 0);
163         LASSERT(len > 0);
164         LASSERT(pageoffset + len <= PAGE_SIZE);
165
166 #ifdef __KERNEL__
167         kiov->kiov_page   = page;
168         kiov->kiov_offset = pageoffset;
169         kiov->kiov_len    = len;
170 #else
171         iov->iov_base = page->addr + pageoffset;
172         iov->iov_len  = len;
173 #endif
174         desc->bd_page_count++;
175         desc->bd_nob += len;
176 }
177
178 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
179 {
180         ENTRY;
181
182         LASSERT(desc != NULL);
183         LASSERT(desc->bd_page_count != 0x5a5a5a5a); /* not freed already */
184         LASSERT(!desc->bd_network_rw);         /* network hands off or */
185         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
186         if (desc->bd_export)
187                 class_export_put(desc->bd_export);
188         else
189                 class_import_put(desc->bd_import);
190
191         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, 
192                                 bd_iov[desc->bd_max_pages]));
193         EXIT;
194 }
195
196 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
197                                        int count, int *lengths, char **bufs)
198 {
199         struct ptlrpc_request *request;
200         int rc;
201         ENTRY;
202
203         LASSERT((unsigned long)imp > 0x1000);
204
205         OBD_ALLOC(request, sizeof(*request));
206         if (!request) {
207                 CERROR("request allocation out of memory\n");
208                 RETURN(NULL);
209         }
210
211         rc = lustre_pack_request(request, count, lengths, bufs);
212         if (rc) {
213                 CERROR("cannot pack request %d\n", rc);
214                 OBD_FREE(request, sizeof(*request));
215                 RETURN(NULL);
216         }
217
218         if (imp->imp_server_timeout)
219                 request->rq_timeout = obd_timeout / 2;
220         else
221                 request->rq_timeout = obd_timeout;
222         request->rq_send_state = LUSTRE_IMP_FULL;
223         request->rq_type = PTL_RPC_MSG_REQUEST;
224         request->rq_import = class_import_get(imp);
225
226         request->rq_req_cbid.cbid_fn  = request_out_callback;
227         request->rq_req_cbid.cbid_arg = request;
228
229         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
230         request->rq_reply_cbid.cbid_arg = request;
231         
232         request->rq_phase = RQ_PHASE_NEW;
233
234         /* XXX FIXME bug 249 */
235         request->rq_request_portal = imp->imp_client->cli_request_portal;
236         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
237
238         spin_lock_init(&request->rq_lock);
239         INIT_LIST_HEAD(&request->rq_list);
240         INIT_LIST_HEAD(&request->rq_replay_list);
241         INIT_LIST_HEAD(&request->rq_set_chain);
242         init_waitqueue_head(&request->rq_reply_waitq);
243         request->rq_xid = ptlrpc_next_xid();
244         atomic_set(&request->rq_refcount, 1);
245
246         request->rq_reqmsg->opc = opcode;
247         request->rq_reqmsg->flags = 0;
248
249         RETURN(request);
250 }
251
252 struct ptlrpc_request_set *ptlrpc_prep_set(void)
253 {
254         struct ptlrpc_request_set *set;
255
256         OBD_ALLOC(set, sizeof *set);
257         if (!set)
258                 RETURN(NULL);
259         INIT_LIST_HEAD(&set->set_requests);
260         init_waitqueue_head(&set->set_waitq);
261         set->set_remaining = 0;
262         spin_lock_init(&set->set_new_req_lock);
263         INIT_LIST_HEAD(&set->set_new_requests);
264
265         RETURN(set);
266 }
267
268 /* Finish with this set; opposite of prep_set. */
269 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
270 {
271         struct list_head *tmp;
272         struct list_head *next;
273         int               expected_phase;
274         int               n = 0;
275         ENTRY;
276
277         /* Requests on the set should either all be completed, or all be new */
278         expected_phase = (set->set_remaining == 0) ?
279                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
280         list_for_each (tmp, &set->set_requests) {
281                 struct ptlrpc_request *req =
282                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
283
284                 LASSERT(req->rq_phase == expected_phase);
285                 n++;
286         }
287
288         LASSERT(set->set_remaining == 0 || set->set_remaining == n);
289
290         list_for_each_safe(tmp, next, &set->set_requests) {
291                 struct ptlrpc_request *req =
292                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
293                 list_del_init(&req->rq_set_chain);
294
295                 LASSERT(req->rq_phase == expected_phase);
296
297                 if (req->rq_phase == RQ_PHASE_NEW) {
298
299                         if (req->rq_interpret_reply != NULL) {
300                                 int (*interpreter)(struct ptlrpc_request *,
301                                                    void *, int) =
302                                         req->rq_interpret_reply;
303
304                                 /* higher level (i.e. LOV) failed;
305                                  * let the sub reqs clean up */
306                                 req->rq_status = -EBADR;
307                                 interpreter(req, &req->rq_async_args,
308                                             req->rq_status);
309                         }
310                         set->set_remaining--;
311                 }
312
313                 req->rq_set = NULL;
314                 ptlrpc_req_finished (req);
315         }
316
317         LASSERT(set->set_remaining == 0);
318
319         OBD_FREE(set, sizeof(*set));
320         EXIT;
321 }
322
323 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
324                         struct ptlrpc_request *req)
325 {
326         /* The set takes over the caller's request reference */
327         list_add_tail(&req->rq_set_chain, &set->set_requests);
328         req->rq_set = set;
329         set->set_remaining++;
330 }
331
332 /* lock so many callers can add things, the context that owns the set
333  * is supposed to notice these and move them into the set proper. */
334 void ptlrpc_set_add_new_req(struct ptlrpc_request_set *set,
335                             struct ptlrpc_request *req)
336 {
337         unsigned long flags;
338         spin_lock_irqsave(&set->set_new_req_lock, flags);
339         /* The set takes over the caller's request reference */
340         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
341         req->rq_set = set;
342         spin_unlock_irqrestore(&set->set_new_req_lock, flags);
343 }
344
345 /*
346  * Based on the current state of the import, determine if the request
347  * can be sent, is an error, or should be delayed.
348  *
349  * Returns true if this request should be delayed. If false, and
350  * *status is set, then the request can not be sent and *status is the
351  * error code.  If false and status is 0, then request can be sent.
352  *
353  * The imp->imp_lock must be held.
354  */
355 static int ptlrpc_import_delay_req(struct obd_import *imp, 
356                                    struct ptlrpc_request *req, int *status)
357 {
358         int delay = 0;
359         ENTRY;
360
361         LASSERT (status != NULL);
362         *status = 0;
363
364         if (imp->imp_state == LUSTRE_IMP_NEW) {
365                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
366                 *status = -EIO;
367                 LBUG();
368         }
369         else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
370                 DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
371                 *status = -EIO;
372         }
373         /*
374          * If the import has been invalidated (such as by an OST failure), the
375          * request must fail with -EIO.  
376          */
377         else if (imp->imp_invalid) {
378                 DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
379                 *status = -EIO;
380         } 
381         else if (req->rq_import_generation != imp->imp_generation) {
382                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
383                 *status = -EIO;
384         } 
385         else if (req->rq_send_state != imp->imp_state) {
386                 if (imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) 
387                         *status = -EWOULDBLOCK;
388                 else
389                         delay = 1;
390         }
391
392         RETURN(delay);
393 }
394
395 static int ptlrpc_check_reply(struct ptlrpc_request *req)
396 {
397         unsigned long flags;
398         int rc = 0;
399         ENTRY;
400
401         /* serialise with network callback */
402         spin_lock_irqsave (&req->rq_lock, flags);
403
404         if (req->rq_replied) {
405                 DEBUG_REQ(D_NET, req, "REPLIED:");
406                 GOTO(out, rc = 1);
407         }
408
409         if (req->rq_err) {
410                 DEBUG_REQ(D_ERROR, req, "ABORTED:");
411                 GOTO(out, rc = 1);
412         }
413
414         if (req->rq_resend) {
415                 DEBUG_REQ(D_ERROR, req, "RESEND:");
416                 GOTO(out, rc = 1);
417         }
418
419         if (req->rq_restart) {
420                 DEBUG_REQ(D_ERROR, req, "RESTART:");
421                 GOTO(out, rc = 1);
422         }
423         EXIT;
424  out:
425         spin_unlock_irqrestore (&req->rq_lock, flags);
426         DEBUG_REQ(D_NET, req, "rc = %d for", rc);
427         return rc;
428 }
429
430 static int ptlrpc_check_status(struct ptlrpc_request *req)
431 {
432         int err;
433         ENTRY;
434
435         err = req->rq_repmsg->status;
436         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
437                 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR, err == %d", 
438                           err);
439                 RETURN(err < 0 ? err : -EINVAL);
440         }
441
442         if (err < 0) {
443                 DEBUG_REQ(D_INFO, req, "status is %d", err);
444         } else if (err > 0) {
445                 /* XXX: translate this error from net to host */
446                 DEBUG_REQ(D_INFO, req, "status is %d", err);
447         }
448
449         RETURN(err);
450 }
451
452 static int after_reply(struct ptlrpc_request *req)
453 {
454         unsigned long flags;
455         struct obd_import *imp = req->rq_import;
456         int rc;
457         ENTRY;
458
459         LASSERT(!req->rq_receiving_reply);
460
461         /* NB Until this point, the whole of the incoming message,
462          * including buflens, status etc is in the sender's byte order. */
463
464 #if SWAB_PARANOIA
465         /* Clear reply swab mask; this is a new reply in sender's byte order */
466         req->rq_rep_swab_mask = 0;
467 #endif
468         LASSERT (req->rq_nob_received <= req->rq_replen);
469         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
470         if (rc) {
471                 CERROR("unpack_rep failed: %d\n", rc);
472                 RETURN(-EPROTO);
473         }
474
475         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
476             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
477                 CERROR("invalid packet type received (type=%u)\n",
478                        req->rq_repmsg->type);
479                 RETURN(-EPROTO);
480         }
481
482         /* Store transno in reqmsg for replay. */
483         req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
484
485         rc = ptlrpc_check_status(req);
486
487         /* Either we've been evicted, or the server has failed for
488          * some reason. Try to reconnect, and if that fails, punt to the
489          * upcall. */
490         if (rc == -ENOTCONN) {
491                 if (req->rq_send_state != LUSTRE_IMP_FULL ||
492                     imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
493                         RETURN(-ENOTCONN);
494                 }
495
496                 ptlrpc_request_handle_notconn(req);
497
498                 RETURN(rc);
499         }
500
501         if (req->rq_import->imp_replayable) {
502                 spin_lock_irqsave(&imp->imp_lock, flags);
503                 if (req->rq_replay || req->rq_transno != 0)
504                         ptlrpc_retain_replayable_request(req, imp);
505                 else if (req->rq_commit_cb != NULL)
506                         req->rq_commit_cb(req);
507
508                 if (req->rq_transno > imp->imp_max_transno)
509                         imp->imp_max_transno = req->rq_transno;
510
511                 /* Replay-enabled imports return commit-status information. */
512                 if (req->rq_repmsg->last_committed)
513                         imp->imp_peer_committed_transno =
514                                 req->rq_repmsg->last_committed;
515                 ptlrpc_free_committed(imp);
516                 spin_unlock_irqrestore(&imp->imp_lock, flags);
517         }
518
519         RETURN(rc);
520 }
521
522 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
523 {
524         struct obd_import     *imp;
525         unsigned long          flags;
526         int rc;
527         ENTRY;
528
529         LASSERT(req->rq_phase == RQ_PHASE_NEW);
530         req->rq_phase = RQ_PHASE_RPC;
531
532         imp = req->rq_import;
533         spin_lock_irqsave(&imp->imp_lock, flags);
534
535         if (imp->imp_invalid) {
536                 spin_unlock_irqrestore(&imp->imp_lock, flags);
537                 req->rq_status = -EIO;
538                 req->rq_phase = RQ_PHASE_INTERPRET;
539                 RETURN(-EIO);
540         }
541
542         req->rq_import_generation = imp->imp_generation;
543
544         if (ptlrpc_import_delay_req(imp, req, &rc)) {
545                 spin_lock (&req->rq_lock);
546                 req->rq_waiting = 1;
547                 spin_unlock (&req->rq_lock);
548
549                 LASSERT(list_empty (&req->rq_list));
550
551                 // list_del(&req->rq_list);
552                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
553                 spin_unlock_irqrestore(&imp->imp_lock, flags);
554                 RETURN(0);
555         }
556
557         if (rc != 0) {
558                 spin_unlock_irqrestore(&imp->imp_lock, flags);
559                 req->rq_status = rc;
560                 req->rq_phase = RQ_PHASE_INTERPRET;
561                 RETURN(rc);
562         }
563
564         /* XXX this is the same as ptlrpc_queue_wait */
565         LASSERT(list_empty(&req->rq_list));
566         list_add_tail(&req->rq_list, &imp->imp_sending_list);
567         spin_unlock_irqrestore(&imp->imp_lock, flags);
568
569         req->rq_reqmsg->status = current->pid;
570         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc"
571                " %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
572                imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
573                req->rq_xid,
574                imp->imp_connection->c_peer.peer_ni->pni_name,
575                imp->imp_connection->c_peer.peer_nid,
576                req->rq_reqmsg->opc);
577
578         rc = ptl_send_rpc(req);
579         if (rc) {
580                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
581                 req->rq_timeout = 1;
582                 RETURN(rc);
583         }
584         RETURN(0);
585 }
586
587 int ptlrpc_check_set(struct ptlrpc_request_set *set)
588 {
589         unsigned long flags;
590         struct list_head *tmp;
591         int force_timer_recalc = 0;
592         ENTRY;
593
594         if (set->set_remaining == 0)
595                 RETURN(1);
596
597         list_for_each(tmp, &set->set_requests) {
598                 struct ptlrpc_request *req =
599                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
600                 struct obd_import *imp = req->rq_import;
601                 int rc = 0;
602
603                 if (req->rq_phase == RQ_PHASE_NEW &&
604                     ptlrpc_send_new_req(req)) {
605                         force_timer_recalc = 1;
606                 }
607
608                 if (!(req->rq_phase == RQ_PHASE_RPC ||
609                       req->rq_phase == RQ_PHASE_BULK ||
610                       req->rq_phase == RQ_PHASE_INTERPRET ||
611                       req->rq_phase == RQ_PHASE_COMPLETE)) {
612                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
613                         LBUG();
614                 }
615
616                 if (req->rq_phase == RQ_PHASE_COMPLETE)
617                         continue;
618
619                 if (req->rq_phase == RQ_PHASE_INTERPRET)
620                         GOTO(interpret, req->rq_status);
621
622                 if (req->rq_err) {
623                         ptlrpc_unregister_reply(req);
624                         if (req->rq_status == 0)
625                                 req->rq_status = -EIO;
626                         req->rq_phase = RQ_PHASE_INTERPRET;
627
628                         spin_lock_irqsave(&imp->imp_lock, flags);
629                         list_del_init(&req->rq_list);
630                         spin_unlock_irqrestore(&imp->imp_lock, flags);
631
632                         GOTO(interpret, req->rq_status);
633                 }
634
635                 /* ptlrpc_queue_wait->l_wait_event guarantees that rq_intr
636                  * will only be set after rq_timedout, but the oig waiting
637                  * path sets rq_intr irrespective of whether ptlrpcd has
638                  * seen a timeout.  our policy is to only interpret 
639                  * interrupted rpcs after they have timed out */
640                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting)) {
641                         /* NB could be on delayed list */
642                         ptlrpc_unregister_reply(req);
643                         req->rq_status = -EINTR;
644                         req->rq_phase = RQ_PHASE_INTERPRET;
645
646                         spin_lock_irqsave(&imp->imp_lock, flags);
647                         list_del_init(&req->rq_list);
648                         spin_unlock_irqrestore(&imp->imp_lock, flags);
649
650                         GOTO(interpret, req->rq_status);
651                 }
652
653                 if (req->rq_phase == RQ_PHASE_RPC) {
654                         if (req->rq_waiting || req->rq_resend) {
655                                 int status;
656
657                                 LASSERT (!ptlrpc_client_receiving_reply(req));
658                                 LASSERT (req->rq_bulk == NULL ||
659                                          !ptlrpc_bulk_active(req->rq_bulk));
660
661                                 spin_lock_irqsave(&imp->imp_lock, flags);
662
663                                 if (ptlrpc_import_delay_req(imp, req, &status)) {
664                                         spin_unlock_irqrestore(&imp->imp_lock,
665                                                                flags);
666                                         continue;
667                                 } 
668
669                                 list_del_init(&req->rq_list);
670                                 if (status != 0)  {
671                                         req->rq_status = status;
672                                         req->rq_phase = RQ_PHASE_INTERPRET;
673                                         spin_unlock_irqrestore(&imp->imp_lock,
674                                                                flags);
675                                         GOTO(interpret, req->rq_status);
676                                 }
677                                 if (req->rq_no_resend) {
678                                         req->rq_status = -ENOTCONN;
679                                         req->rq_phase = RQ_PHASE_INTERPRET;
680                                         spin_unlock_irqrestore(&imp->imp_lock,
681                                                                flags);
682                                         GOTO(interpret, req->rq_status);
683                                 }
684                                 list_add_tail(&req->rq_list,
685                                               &imp->imp_sending_list);
686
687                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
688
689                                 req->rq_waiting = 0;
690                                 if (req->rq_resend) {
691                                         lustre_msg_add_flags(req->rq_reqmsg,
692                                                              MSG_RESENT);
693                                         ptlrpc_unregister_reply(req);
694                                         if (req->rq_bulk) {
695                                                 __u64 old_xid = req->rq_xid;
696
697                                                 /* ensure previous bulk fails */
698                                                 req->rq_xid = ptlrpc_next_xid();
699                                                 CDEBUG(D_HA, "resend bulk "
700                                                        "old x"LPU64
701                                                        " new x"LPU64"\n",
702                                                        old_xid, req->rq_xid);
703                                         }
704                                 }
705
706                                 rc = ptl_send_rpc(req);
707                                 if (rc) {
708                                         DEBUG_REQ(D_HA, req, "send failed (%d)",
709                                                   rc);
710                                         force_timer_recalc = 1;
711                                         req->rq_timeout = 0;
712                                 }
713                                 /* need to reset the timeout */
714                                 force_timer_recalc = 1;
715                         }
716
717                         /* Still waiting for a reply? */
718                         if (ptlrpc_client_receiving_reply(req))
719                                 continue;
720
721                         /* Did we actually receive a reply? */
722                         if (!ptlrpc_client_replied(req))
723                                 continue;
724
725                         spin_lock_irqsave(&imp->imp_lock, flags);
726                         list_del_init(&req->rq_list);
727                         spin_unlock_irqrestore(&imp->imp_lock, flags);
728
729                         req->rq_status = after_reply(req);
730                         if (req->rq_resend) {
731                                 /* Add this req to the delayed list so
732                                    it can be errored if the import is
733                                    evicted after recovery. */
734                                 spin_lock_irqsave (&req->rq_lock, flags);
735                                 list_add_tail(&req->rq_list, 
736                                               &imp->imp_delayed_list);
737                                 spin_unlock_irqrestore(&req->rq_lock, flags);
738                                 continue;
739                         }
740
741                         /* If there is no bulk associated with this request,
742                          * then we're done and should let the interpreter
743                          * process the reply.  Similarly if the RPC returned
744                          * an error, and therefore the bulk will never arrive.
745                          */
746                         if (req->rq_bulk == NULL || req->rq_status != 0) {
747                                 req->rq_phase = RQ_PHASE_INTERPRET;
748                                 GOTO(interpret, req->rq_status);
749                         }
750
751                         req->rq_phase = RQ_PHASE_BULK;
752                 }
753
754                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
755                 if (ptlrpc_bulk_active(req->rq_bulk))
756                         continue;
757
758                 if (!req->rq_bulk->bd_success) {
759                         /* The RPC reply arrived OK, but the bulk screwed
760                          * up!  Dead wierd since the server told us the RPC
761                          * was good after getting the REPLY for her GET or
762                          * the ACK for her PUT. */
763                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
764                         LBUG();
765                 }
766
767                 req->rq_phase = RQ_PHASE_INTERPRET;
768
769         interpret:
770                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
771                 LASSERT(!req->rq_receiving_reply);
772
773                 ptlrpc_unregister_reply(req);
774                 if (req->rq_bulk != NULL)
775                         ptlrpc_unregister_bulk (req);
776
777                 req->rq_phase = RQ_PHASE_COMPLETE;
778
779                 if (req->rq_interpret_reply != NULL) {
780                         int (*interpreter)(struct ptlrpc_request *,void *,int) =
781                                 req->rq_interpret_reply;
782                         req->rq_status = interpreter(req, &req->rq_async_args,
783                                                      req->rq_status);
784                 }
785
786                 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:"
787                        "opc %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
788                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
789                        req->rq_xid,
790                        imp->imp_connection->c_peer.peer_ni->pni_name,
791                        imp->imp_connection->c_peer.peer_nid,
792                        req->rq_reqmsg->opc);
793
794                 set->set_remaining--;
795         }
796
797         /* If we hit an error, we want to recover promptly. */
798         RETURN(set->set_remaining == 0 || force_timer_recalc);
799 }
800
801 int ptlrpc_expire_one_request(struct ptlrpc_request *req)
802 {
803         unsigned long      flags;
804         struct obd_import *imp = req->rq_import;
805         ENTRY;
806
807         DEBUG_REQ(D_ERROR, req, "timeout");
808
809         spin_lock_irqsave (&req->rq_lock, flags);
810         req->rq_timedout = 1;
811         spin_unlock_irqrestore (&req->rq_lock, flags);
812
813         ptlrpc_unregister_reply (req);
814
815         if (req->rq_bulk != NULL)
816                 ptlrpc_unregister_bulk (req);
817
818         if (imp == NULL) {
819                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
820                 RETURN(1);
821         }
822
823         /* The DLM server doesn't want recovery run on its imports. */
824         if (imp->imp_dlm_fake)
825                 RETURN(1);
826
827         /* If this request is for recovery or other primordial tasks,
828          * then error it out here. */
829         if (req->rq_send_state != LUSTRE_IMP_FULL || 
830             imp->imp_obd->obd_no_recov) {
831                 spin_lock_irqsave (&req->rq_lock, flags);
832                 req->rq_status = -ETIMEDOUT;
833                 req->rq_err = 1;
834                 spin_unlock_irqrestore (&req->rq_lock, flags);
835                 RETURN(1);
836         }
837
838         ptlrpc_fail_import(imp, req->rq_import_generation);
839
840         RETURN(0);
841 }
842
843 int ptlrpc_expired_set(void *data)
844 {
845         struct ptlrpc_request_set *set = data;
846         struct list_head          *tmp;
847         time_t                     now = LTIME_S (CURRENT_TIME);
848         ENTRY;
849
850         LASSERT(set != NULL);
851
852         /* A timeout expired; see which reqs it applies to... */
853         list_for_each (tmp, &set->set_requests) {
854                 struct ptlrpc_request *req =
855                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
856
857                 /* request in-flight? */
858                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting 
859                        && !req->rq_resend) ||
860                       (req->rq_phase == RQ_PHASE_BULK)))
861                         continue;
862
863                 if (req->rq_timedout ||           /* already dealt with */
864                     req->rq_sent + req->rq_timeout > now) /* not expired */
865                         continue;
866
867                 /* deal with this guy */
868                 ptlrpc_expire_one_request (req);
869         }
870
871         /* When waiting for a whole set, we always to break out of the
872          * sleep so we can recalculate the timeout, or enable interrupts
873          * iff everyone's timed out.
874          */
875         RETURN(1);
876 }
877
878 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
879 {
880         unsigned long flags;
881         spin_lock_irqsave(&req->rq_lock, flags);
882         req->rq_intr = 1;
883         spin_unlock_irqrestore(&req->rq_lock, flags);
884 }
885
886 void ptlrpc_interrupted_set(void *data)
887 {
888         struct ptlrpc_request_set *set = data;
889         struct list_head *tmp;
890
891         LASSERT(set != NULL);
892         CERROR("INTERRUPTED SET %p\n", set);
893
894         list_for_each(tmp, &set->set_requests) {
895                 struct ptlrpc_request *req =
896                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
897
898                 if (req->rq_phase != RQ_PHASE_RPC)
899                         continue;
900
901                 ptlrpc_mark_interrupted(req);
902         }
903 }
904
905 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
906 {
907         struct list_head      *tmp;
908         time_t                 now = LTIME_S(CURRENT_TIME);
909         time_t                 deadline;
910         int                    timeout = 0;
911         struct ptlrpc_request *req;
912         ENTRY;
913
914         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
915
916         list_for_each(tmp, &set->set_requests) {
917                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
918
919                 /* request in-flight? */
920                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
921                       (req->rq_phase == RQ_PHASE_BULK)))
922                         continue;
923
924                 if (req->rq_timedout)   /* already timed out */
925                         continue;
926
927                 deadline = req->rq_sent + req->rq_timeout;
928                 if (deadline <= now)    /* actually expired already */
929                         timeout = 1;    /* ASAP */
930                 else if (timeout == 0 || timeout > deadline - now)
931                         timeout = deadline - now;
932         }
933         RETURN(timeout);
934 }
935                 
936
937 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
938 {
939         struct list_head      *tmp;
940         struct ptlrpc_request *req;
941         struct l_wait_info     lwi;
942         int                    rc, timeout;
943         ENTRY;
944
945         LASSERT(!list_empty(&set->set_requests));
946         list_for_each(tmp, &set->set_requests) {
947                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
948                 if (req->rq_phase == RQ_PHASE_NEW)
949                         (void)ptlrpc_send_new_req(req);
950         }
951
952         do {
953                 timeout = ptlrpc_set_next_timeout(set);
954
955                 /* wait until all complete, interrupted, or an in-flight
956                  * req times out */
957                 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
958                        set, timeout);
959                 lwi = LWI_TIMEOUT_INTR((timeout ? timeout : 1) * HZ,
960                                        ptlrpc_expired_set, 
961                                        ptlrpc_interrupted_set, set);
962                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
963
964                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
965
966                 /* -EINTR => all requests have been flagged rq_intr so next
967                  * check completes.
968                  * -ETIMEOUTD => someone timed out.  When all reqs have
969                  * timed out, signals are enabled allowing completion with
970                  * EINTR.
971                  * I don't really care if we go once more round the loop in
972                  * the error cases -eeb. */
973         } while (rc != 0);
974
975         LASSERT(set->set_remaining == 0);
976
977         rc = 0;
978         list_for_each(tmp, &set->set_requests) {
979                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
980
981                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
982                 if (req->rq_status != 0)
983                         rc = req->rq_status;
984         }
985
986         if (set->set_interpret != NULL) {
987                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
988                         set->set_interpret;
989                 rc = interpreter (set, &set->set_args, rc);
990         }
991
992         RETURN(rc);
993 }
994
995 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
996 {
997         ENTRY;
998         if (request == NULL) {
999                 EXIT;
1000                 return;
1001         }
1002
1003         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
1004         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
1005         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
1006         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
1007
1008         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
1009          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
1010         if (request->rq_import != NULL) {
1011                 unsigned long flags = 0;
1012                 if (!locked)
1013                         spin_lock_irqsave(&request->rq_import->imp_lock, flags);
1014                 list_del_init(&request->rq_replay_list);
1015                 if (!locked)
1016                         spin_unlock_irqrestore(&request->rq_import->imp_lock,
1017                                                flags);
1018         }
1019         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
1020
1021         if (atomic_read(&request->rq_refcount) != 0) {
1022                 DEBUG_REQ(D_ERROR, request,
1023                           "freeing request with nonzero refcount");
1024                 LBUG();
1025         }
1026
1027         if (request->rq_repmsg != NULL) {
1028                 OBD_FREE(request->rq_repmsg, request->rq_replen);
1029                 request->rq_repmsg = NULL;
1030         }
1031         if (request->rq_reqmsg != NULL) {
1032                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
1033                 request->rq_reqmsg = NULL;
1034         }
1035         if (request->rq_export != NULL) {
1036                 class_export_put(request->rq_export);
1037                 request->rq_export = NULL;
1038         }
1039         if (request->rq_import != NULL) {
1040                 class_import_put(request->rq_import);
1041                 request->rq_import = NULL;
1042         }
1043         if (request->rq_bulk != NULL)
1044                 ptlrpc_free_bulk(request->rq_bulk);
1045
1046         OBD_FREE(request, sizeof(*request));
1047         EXIT;
1048 }
1049
1050 void ptlrpc_free_req(struct ptlrpc_request *request)
1051 {
1052         __ptlrpc_free_req(request, 0);
1053 }
1054
1055 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
1056 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
1057 {
1058 #ifdef CONFIG_SMP
1059         LASSERT(spin_is_locked(&request->rq_import->imp_lock));
1060 #endif
1061         (void)__ptlrpc_req_finished(request, 1);
1062 }
1063
1064 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
1065 {
1066         ENTRY;
1067         if (request == NULL)
1068                 RETURN(1);
1069
1070         if (request == (void *)(unsigned long)(0x5a5a5a5a5a5a5a5a) ||
1071             request->rq_reqmsg == (void *)(unsigned long)(0x5a5a5a5a5a5a5a5a)) {
1072                 CERROR("dereferencing freed request (bug 575)\n");
1073                 LBUG();
1074                 RETURN(1);
1075         }
1076
1077         DEBUG_REQ(D_INFO, request, "refcount now %u",
1078                   atomic_read(&request->rq_refcount) - 1);
1079
1080         if (atomic_dec_and_test(&request->rq_refcount)) {
1081                 __ptlrpc_free_req(request, locked);
1082                 RETURN(1);
1083         }
1084
1085         RETURN(0);
1086 }
1087
1088 void ptlrpc_req_finished(struct ptlrpc_request *request)
1089 {
1090         __ptlrpc_req_finished(request, 0);
1091 }
1092
1093 /* Disengage the client's reply buffer from the network
1094  * NB does _NOT_ unregister any client-side bulk.
1095  * IDEMPOTENT, but _not_ safe against concurrent callers.
1096  * The request owner (i.e. the thread doing the I/O) must call...
1097  */
1098 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
1099 {
1100         int                rc;
1101         wait_queue_head_t *wq;
1102         struct l_wait_info lwi;
1103
1104         LASSERT(!in_interrupt ());             /* might sleep */
1105
1106         if (!ptlrpc_client_receiving_reply(request))
1107                 return;
1108
1109         rc = PtlMDUnlink (request->rq_reply_md_h);
1110         if (rc == PTL_INV_MD) {
1111                 LASSERT (!ptlrpc_client_receiving_reply(request));
1112                 return;
1113         }
1114         
1115         LASSERT (rc == PTL_OK);
1116
1117         if (request->rq_set == NULL)
1118                 wq = &request->rq_set->set_waitq;
1119         else
1120                 wq = &request->rq_reply_waitq;
1121
1122         for (;;) {
1123                 /* Network access will complete in finite time but the HUGE
1124                  * timeout lets us CWARN for visibility of sluggish NALs */
1125                 lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
1126                 rc = l_wait_event (*wq, !ptlrpc_client_receiving_reply(request), &lwi);
1127                 if (rc == 0)
1128                         return;
1129
1130                 LASSERT (rc == -ETIMEDOUT);
1131                 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout");
1132         }
1133 }
1134
1135 /* caller must hold imp->imp_lock */
1136 void ptlrpc_free_committed(struct obd_import *imp)
1137 {
1138         struct list_head *tmp, *saved;
1139         struct ptlrpc_request *req;
1140         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1141         ENTRY;
1142
1143         LASSERT(imp != NULL);
1144
1145 #ifdef CONFIG_SMP
1146         LASSERT(spin_is_locked(&imp->imp_lock));
1147 #endif
1148
1149         CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1150                imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1151
1152         list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1153                 req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1154
1155                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
1156                 LASSERT(req != last_req);
1157                 last_req = req;
1158
1159                 if (req->rq_import_generation < imp->imp_generation) {
1160                         DEBUG_REQ(D_HA, req, "freeing request with old gen");
1161                         GOTO(free_req, 0);
1162                 }
1163
1164                 if (req->rq_replay) {
1165                         DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1166                         continue;
1167                 }
1168
1169                 /* not yet committed */
1170                 if (req->rq_transno > imp->imp_peer_committed_transno) {
1171                         DEBUG_REQ(D_HA, req, "stopping search");
1172                         break;
1173                 }
1174
1175                 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1176                           imp->imp_peer_committed_transno);
1177 free_req:
1178                 if (req->rq_commit_cb != NULL)
1179                         req->rq_commit_cb(req);
1180                 list_del_init(&req->rq_replay_list);
1181                 __ptlrpc_req_finished(req, 1);
1182         }
1183
1184         EXIT;
1185         return;
1186 }
1187
1188 void ptlrpc_cleanup_client(struct obd_import *imp)
1189 {
1190         ENTRY;
1191         EXIT;
1192         return;
1193 }
1194
1195 void ptlrpc_resend_req(struct ptlrpc_request *req)
1196 {
1197         unsigned long flags;
1198
1199         DEBUG_REQ(D_HA, req, "going to resend");
1200         req->rq_reqmsg->handle.cookie = 0;
1201         req->rq_status = -EAGAIN;
1202
1203         spin_lock_irqsave (&req->rq_lock, flags);
1204         req->rq_resend = 1;
1205         req->rq_timedout = 0;
1206         if (req->rq_bulk) {
1207                 __u64 old_xid = req->rq_xid;
1208                 
1209                 /* ensure previous bulk fails */
1210                 req->rq_xid = ptlrpc_next_xid();
1211                 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
1212                        old_xid, req->rq_xid);
1213         }
1214         ptlrpc_wake_client_req(req);
1215         spin_unlock_irqrestore (&req->rq_lock, flags);
1216
1217 }
1218
1219 /* XXX: this function and rq_status are currently unused */
1220 void ptlrpc_restart_req(struct ptlrpc_request *req)
1221 {
1222         unsigned long flags;
1223
1224         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1225         req->rq_status = -ERESTARTSYS;
1226
1227         spin_lock_irqsave (&req->rq_lock, flags);
1228         req->rq_restart = 1;
1229         req->rq_timedout = 0;
1230         ptlrpc_wake_client_req(req);
1231         spin_unlock_irqrestore (&req->rq_lock, flags);
1232 }
1233
1234 static int expired_request(void *data)
1235 {
1236         struct ptlrpc_request *req = data;
1237         ENTRY;
1238
1239         RETURN(ptlrpc_expire_one_request(req));
1240 }
1241
1242 static void interrupted_request(void *data)
1243 {
1244         unsigned long flags;
1245
1246         struct ptlrpc_request *req = data;
1247         DEBUG_REQ(D_HA, req, "request interrupted");
1248         spin_lock_irqsave (&req->rq_lock, flags);
1249         req->rq_intr = 1;
1250         spin_unlock_irqrestore (&req->rq_lock, flags);
1251 }
1252
1253 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1254 {
1255         ENTRY;
1256         atomic_inc(&req->rq_refcount);
1257         RETURN(req);
1258 }
1259
1260 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1261                                       struct obd_import *imp)
1262 {
1263         struct list_head *tmp;
1264
1265 #ifdef CONFIG_SMP
1266         LASSERT(spin_is_locked(&imp->imp_lock));
1267 #endif
1268
1269         /* don't re-add requests that have been replayed */
1270         if (!list_empty(&req->rq_replay_list))
1271                 return;
1272
1273         lustre_msg_add_flags(req->rq_reqmsg,
1274                              MSG_REPLAY);
1275
1276         LASSERT(imp->imp_replayable);
1277         /* Balanced in ptlrpc_free_committed, usually. */
1278         ptlrpc_request_addref(req);
1279         list_for_each_prev(tmp, &imp->imp_replay_list) {
1280                 struct ptlrpc_request *iter =
1281                         list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1282
1283                 /* We may have duplicate transnos if we create and then
1284                  * open a file, or for closes retained if to match creating
1285                  * opens, so use req->rq_xid as a secondary key.
1286                  * (See bugs 684, 685, and 428.)
1287                  * XXX no longer needed, but all opens need transnos!
1288                  */
1289                 if (iter->rq_transno > req->rq_transno)
1290                         continue;
1291
1292                 if (iter->rq_transno == req->rq_transno) {
1293                         LASSERT(iter->rq_xid != req->rq_xid);
1294                         if (iter->rq_xid > req->rq_xid)
1295                                 continue;
1296                 }
1297
1298                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
1299                 return;
1300         }
1301
1302         list_add_tail(&req->rq_replay_list, &imp->imp_replay_list);
1303 }
1304
1305 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1306 {
1307         int rc = 0;
1308         int brc;
1309         struct l_wait_info lwi;
1310         struct obd_import *imp = req->rq_import;
1311         unsigned long flags;
1312         int timeout = 0;
1313         ENTRY;
1314
1315         LASSERT(req->rq_set == NULL);
1316         LASSERT(!req->rq_receiving_reply);
1317
1318         /* for distributed debugging */
1319         req->rq_reqmsg->status = current->pid;
1320         LASSERT(imp->imp_obd != NULL);
1321         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc "
1322                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1323                imp->imp_obd->obd_uuid.uuid,
1324                req->rq_reqmsg->status, req->rq_xid,
1325                imp->imp_connection->c_peer.peer_ni->pni_name,
1326                imp->imp_connection->c_peer.peer_nid,
1327                req->rq_reqmsg->opc);
1328
1329         /* Mark phase here for a little debug help */
1330         req->rq_phase = RQ_PHASE_RPC;
1331
1332         spin_lock_irqsave(&imp->imp_lock, flags);
1333         req->rq_import_generation = imp->imp_generation;
1334 restart:
1335         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1336                 list_del(&req->rq_list);
1337
1338                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1339                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1340
1341                 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%s != %s)",
1342                           current->comm, 
1343                           ptlrpc_import_state_name(req->rq_send_state), 
1344                           ptlrpc_import_state_name(imp->imp_state));
1345                 lwi = LWI_INTR(interrupted_request, req);
1346                 rc = l_wait_event(req->rq_reply_waitq,
1347                                   (req->rq_send_state == imp->imp_state ||
1348                                    req->rq_err),
1349                                   &lwi);
1350                 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%s == %s or %d == 1)",
1351                           current->comm, 
1352                           ptlrpc_import_state_name(imp->imp_state), 
1353                           ptlrpc_import_state_name(req->rq_send_state),
1354                           req->rq_err);
1355
1356                 spin_lock_irqsave(&imp->imp_lock, flags);
1357                 list_del_init(&req->rq_list);
1358
1359                 if (req->rq_err) {
1360                         rc = -EIO;
1361                 } 
1362                 else if (req->rq_intr) {
1363                         rc = -EINTR;
1364                 }
1365                 else if (req->rq_no_resend) {
1366                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1367                         GOTO(out, rc = -ETIMEDOUT);
1368                 }
1369                 else {
1370                         GOTO(restart, rc);
1371                 }
1372         } 
1373
1374         if (rc != 0) {
1375                 list_del_init(&req->rq_list);
1376                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1377                 req->rq_status = rc; // XXX this ok?
1378                 GOTO(out, rc);
1379         }
1380
1381         if (req->rq_resend) {
1382                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1383
1384                 if (req->rq_bulk != NULL)
1385                         ptlrpc_unregister_bulk (req);
1386
1387                 DEBUG_REQ(D_HA, req, "resending: ");
1388         }
1389
1390         /* XXX this is the same as ptlrpc_set_wait */
1391         LASSERT(list_empty(&req->rq_list));
1392         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1393         spin_unlock_irqrestore(&imp->imp_lock, flags);
1394
1395         rc = ptl_send_rpc(req);
1396         if (rc) {
1397                 DEBUG_REQ(D_HA, req, "send failed (%d); recovering", rc);
1398                 timeout = 1;
1399         } else {
1400                 timeout = MAX(req->rq_timeout * HZ, 1);
1401                 DEBUG_REQ(D_NET, req, "-- sleeping");
1402         }
1403         lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1404                                req);
1405         l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req), &lwi);
1406         DEBUG_REQ(D_NET, req, "-- done sleeping");
1407
1408         CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:opc "
1409                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1410                imp->imp_obd->obd_uuid.uuid,
1411                req->rq_reqmsg->status, req->rq_xid,
1412                imp->imp_connection->c_peer.peer_ni->pni_name,
1413                imp->imp_connection->c_peer.peer_nid,
1414                req->rq_reqmsg->opc);
1415
1416         spin_lock_irqsave(&imp->imp_lock, flags);
1417         list_del_init(&req->rq_list);
1418         spin_unlock_irqrestore(&imp->imp_lock, flags);
1419
1420         /* If the reply was received normally, this just grabs the spinlock
1421          * (ensuring the reply callback has returned), sees that
1422          * req->rq_receiving_reply is clear and returns. */
1423         ptlrpc_unregister_reply (req);
1424
1425         if (req->rq_err)
1426                 GOTO(out, rc = -EIO);
1427
1428         /* Resend if we need to, unless we were interrupted. */
1429         if (req->rq_resend && !req->rq_intr) {
1430                 /* ...unless we were specifically told otherwise. */
1431                 if (req->rq_no_resend)
1432                         GOTO(out, rc = -ETIMEDOUT);
1433                 spin_lock_irqsave(&imp->imp_lock, flags);
1434                 goto restart;
1435         }
1436
1437         if (req->rq_intr) {
1438                 /* Should only be interrupted if we timed out. */
1439                 if (!req->rq_timedout)
1440                         DEBUG_REQ(D_ERROR, req,
1441                                   "rq_intr set but rq_timedout not");
1442                 GOTO(out, rc = -EINTR);
1443         }
1444
1445         if (req->rq_timedout) {                 /* non-recoverable timeout */
1446                 GOTO(out, rc = -ETIMEDOUT);
1447         }
1448
1449         if (!req->rq_replied) {
1450                 /* How can this be? -eeb */
1451                 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1452                 LBUG();
1453                 GOTO(out, rc = req->rq_status);
1454         }
1455
1456         rc = after_reply (req);
1457         /* NB may return +ve success rc */
1458         if (req->rq_resend) {
1459                 spin_lock_irqsave(&imp->imp_lock, flags);
1460                 goto restart;
1461         }
1462
1463  out:
1464         if (req->rq_bulk != NULL) {
1465                 if (rc >= 0) {                  
1466                         /* success so far.  Note that anything going wrong
1467                          * with bulk now, is EXTREMELY strange, since the
1468                          * server must have believed that the bulk
1469                          * tranferred OK before she replied with success to
1470                          * me. */
1471                         lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1472                         brc = l_wait_event(req->rq_reply_waitq,
1473                                            !ptlrpc_bulk_active(req->rq_bulk),
1474                                            &lwi);
1475                         LASSERT(brc == 0 || brc == -ETIMEDOUT);
1476                         if (brc != 0) {
1477                                 LASSERT(brc == -ETIMEDOUT);
1478                                 DEBUG_REQ(D_ERROR, req, "bulk timed out");
1479                                 rc = brc;
1480                         } else if (!req->rq_bulk->bd_success) {
1481                                 DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1482                                 rc = -EIO;
1483                         }
1484                 }
1485                 if (rc < 0)
1486                         ptlrpc_unregister_bulk (req);
1487         }
1488
1489         LASSERT(!req->rq_receiving_reply);
1490         req->rq_phase = RQ_PHASE_INTERPRET;
1491         RETURN(rc);
1492 }
1493
1494 struct ptlrpc_replay_async_args {
1495         int praa_old_state;
1496         int praa_old_status;
1497 };
1498
1499 static int ptlrpc_replay_interpret(struct ptlrpc_request *req,
1500                                     void * data, int rc)
1501 {
1502         struct ptlrpc_replay_async_args *aa = data;
1503         struct obd_import *imp = req->rq_import;
1504         unsigned long flags;
1505
1506         atomic_dec(&imp->imp_replay_inflight);
1507         
1508         if (!req->rq_replied) {
1509                 CERROR("request replay timed out, restarting recovery\n");
1510                 GOTO(out, rc = -ETIMEDOUT);
1511         }
1512
1513 #if SWAB_PARANOIA
1514         /* Clear reply swab mask; this is a new reply in sender's byte order */
1515         req->rq_rep_swab_mask = 0;
1516 #endif
1517         LASSERT (req->rq_nob_received <= req->rq_replen);
1518         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
1519         if (rc) {
1520                 CERROR("unpack_rep failed: %d\n", rc);
1521                 GOTO(out, rc = -EPROTO);
1522         }
1523
1524         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR && 
1525             req->rq_repmsg->status == -ENOTCONN) 
1526                 GOTO(out, rc = req->rq_repmsg->status);
1527
1528         /* The transno had better not change over replay. */
1529         LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1530
1531         DEBUG_REQ(D_HA, req, "got rep");
1532
1533         /* let the callback do fixups, possibly including in the request */
1534         if (req->rq_replay_cb)
1535                 req->rq_replay_cb(req);
1536
1537         if (req->rq_replied && req->rq_repmsg->status != aa->praa_old_status) {
1538                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
1539                           req->rq_repmsg->status, aa->praa_old_status);
1540         } else {
1541                 /* Put it back for re-replay. */
1542                 req->rq_repmsg->status = aa->praa_old_status;
1543         }
1544
1545         spin_lock_irqsave(&imp->imp_lock, flags);
1546         imp->imp_last_replay_transno = req->rq_transno;
1547         spin_unlock_irqrestore(&imp->imp_lock, flags);
1548
1549         /* continue with recovery */
1550         rc = ptlrpc_import_recovery_state_machine(imp);
1551  out:
1552         req->rq_send_state = aa->praa_old_state;
1553         
1554         if (rc != 0)
1555                 /* this replay failed, so restart recovery */
1556                 ptlrpc_connect_import(imp, NULL);
1557
1558         RETURN(rc);
1559 }
1560
1561
1562 int ptlrpc_replay_req(struct ptlrpc_request *req)
1563 {
1564         struct ptlrpc_replay_async_args *aa;
1565         ENTRY;
1566
1567         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
1568
1569         /* Not handling automatic bulk replay yet (or ever?) */
1570         LASSERT(req->rq_bulk == NULL);
1571
1572         DEBUG_REQ(D_HA, req, "REPLAY");
1573
1574         LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
1575         aa = (struct ptlrpc_replay_async_args *)&req->rq_async_args;
1576         memset(aa, 0, sizeof *aa);
1577
1578         /* Prepare request to be resent with ptlrpcd */
1579         aa->praa_old_state = req->rq_send_state;
1580         req->rq_send_state = LUSTRE_IMP_REPLAY;
1581         req->rq_phase = RQ_PHASE_NEW;
1582         /*
1583          * Q: "How can a req get on the replay list if it wasn't replied?"
1584          * A: "If we failed during the replay of this request, it will still
1585          *     be on the list, but rq_replied will have been reset to 0."
1586          */
1587         if (req->rq_replied) {
1588                 aa->praa_old_status = req->rq_repmsg->status;
1589                 req->rq_status = 0;
1590                 req->rq_replied = 0;
1591         }
1592
1593         req->rq_interpret_reply = ptlrpc_replay_interpret;
1594         atomic_inc(&req->rq_import->imp_replay_inflight);
1595         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
1596
1597         ptlrpcd_add_req(req);
1598         RETURN(0);
1599 }
1600
1601 void ptlrpc_abort_inflight(struct obd_import *imp)
1602 {
1603         unsigned long flags;
1604         struct list_head *tmp, *n;
1605         ENTRY;
1606
1607         /* Make sure that no new requests get processed for this import.
1608          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1609          * this flag and then putting requests on sending_list or delayed_list.
1610          */
1611         spin_lock_irqsave(&imp->imp_lock, flags);
1612
1613         /* XXX locking?  Maybe we should remove each request with the list
1614          * locked?  Also, how do we know if the requests on the list are
1615          * being freed at this time?
1616          */
1617         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1618                 struct ptlrpc_request *req =
1619                         list_entry(tmp, struct ptlrpc_request, rq_list);
1620
1621                 DEBUG_REQ(D_HA, req, "inflight");
1622
1623                 spin_lock (&req->rq_lock);
1624                 if (req->rq_import_generation < imp->imp_generation) {
1625                         req->rq_err = 1;
1626                         ptlrpc_wake_client_req(req);
1627                 }
1628                 spin_unlock (&req->rq_lock);
1629         }
1630
1631         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1632                 struct ptlrpc_request *req =
1633                         list_entry(tmp, struct ptlrpc_request, rq_list);
1634
1635                 DEBUG_REQ(D_HA, req, "aborting waiting req");
1636
1637                 spin_lock (&req->rq_lock);
1638                 if (req->rq_import_generation < imp->imp_generation) {
1639                         req->rq_err = 1;
1640                         ptlrpc_wake_client_req(req);
1641                 }
1642                 spin_unlock (&req->rq_lock);
1643         }
1644
1645         /* Last chance to free reqs left on the replay list, but we
1646          * will still leak reqs that haven't comitted.  */
1647         if (imp->imp_replayable)
1648                 ptlrpc_free_committed(imp);
1649
1650         spin_unlock_irqrestore(&imp->imp_lock, flags);
1651
1652         EXIT;
1653 }
1654
1655 static __u64 ptlrpc_last_xid = 0;
1656 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1657
1658 __u64 ptlrpc_next_xid(void)
1659 {
1660         __u64 tmp;
1661         spin_lock(&ptlrpc_last_xid_lock);
1662         tmp = ++ptlrpc_last_xid;
1663         spin_unlock(&ptlrpc_last_xid_lock);
1664         return tmp;
1665 }
1666
1667