Whamcloud - gitweb
- landing b_fid.
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <errno.h>
26 #include <signal.h>
27 #include <liblustre.h>
28 #endif
29
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_ha.h>
34 #include <linux/lustre_import.h>
35
36 #include "ptlrpc_internal.h"
37
38 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
39                         struct ptlrpc_client *cl)
40 {
41         cl->cli_request_portal = req_portal;
42         cl->cli_reply_portal   = rep_portal;
43         cl->cli_name           = name;
44 }
45
46 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
47 {
48         struct ptlrpc_connection *c;
49         struct ptlrpc_peer peer;
50         int err;
51
52         err = ptlrpc_uuid_to_peer(uuid, &peer);
53         if (err != 0) {
54                 CERROR("cannot find peer %s!\n", uuid->uuid);
55                 return NULL;
56         }
57
58         c = ptlrpc_get_connection(&peer, uuid);
59         if (c) {
60                 memcpy(c->c_remote_uuid.uuid,
61                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
62         }
63
64         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
65
66         return c;
67 }
68
69 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
70                                  struct obd_uuid *uuid)
71 {
72         struct ptlrpc_peer peer;
73         int err;
74
75         err = ptlrpc_uuid_to_peer(uuid, &peer);
76         if (err != 0) {
77                 CERROR("cannot find peer %s!\n", uuid->uuid);
78                 return;
79         }
80
81         memcpy(&conn->c_peer, &peer, sizeof (peer));
82         return;
83 }
84
85 static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
86 {
87         struct ptlrpc_bulk_desc *desc;
88
89         OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages]));
90         if (!desc)
91                 return NULL;
92
93         spin_lock_init(&desc->bd_lock);
94         init_waitqueue_head(&desc->bd_waitq);
95         desc->bd_max_iov = npages;
96         desc->bd_iov_count = 0;
97         desc->bd_md_h = PTL_INVALID_HANDLE;
98         desc->bd_portal = portal;
99         desc->bd_type = type;
100         
101         return desc;
102 }
103
104 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
105                                                int npages, int type, int portal)
106 {
107         struct obd_import *imp = req->rq_import;
108         struct ptlrpc_bulk_desc *desc;
109
110         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
111         desc = new_bulk(npages, type, portal);
112         if (desc == NULL)
113                 RETURN(NULL);
114
115         desc->bd_import_generation = req->rq_import_generation;
116         desc->bd_import = class_import_get(imp);
117         desc->bd_req = req;
118
119         desc->bd_cbid.cbid_fn  = client_bulk_callback;
120         desc->bd_cbid.cbid_arg = desc;
121
122         /* This makes req own desc, and free it when she frees herself */
123         req->rq_bulk = desc;
124
125         return desc;
126 }
127
128 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
129                                                int npages, int type, int portal)
130 {
131         struct obd_export *exp = req->rq_export;
132         struct ptlrpc_bulk_desc *desc;
133
134         LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
135
136         desc = new_bulk(npages, type, portal);
137         if (desc == NULL)
138                 RETURN(NULL);
139
140         desc->bd_export = class_export_get(exp);
141         desc->bd_req = req;
142
143         desc->bd_cbid.cbid_fn  = server_bulk_callback;
144         desc->bd_cbid.cbid_arg = desc;
145
146         /* NB we don't assign rq_bulk here; server-side requests are
147          * re-used, and the handler frees the bulk desc explicitly. */
148
149         return desc;
150 }
151
152 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
153                            struct page *page, int pageoffset, int len)
154 {
155         LASSERT(desc->bd_iov_count < desc->bd_max_iov);
156         LASSERT(page != NULL);
157         LASSERT(pageoffset >= 0);
158         LASSERT(len > 0);
159         LASSERT(pageoffset + len <= PAGE_SIZE);
160
161         desc->bd_nob += len;
162
163         ptlrpc_add_bulk_page(desc, page, pageoffset, len);
164 }
165
166 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
167 {
168         ENTRY;
169
170         LASSERT(desc != NULL);
171         LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
172         LASSERT(!desc->bd_network_rw);         /* network hands off or */
173         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
174         if (desc->bd_export)
175                 class_export_put(desc->bd_export);
176         else
177                 class_import_put(desc->bd_import);
178
179         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, 
180                                 bd_iov[desc->bd_max_iov]));
181         EXIT;
182 }
183
184 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
185                                        int opcode, int count, int *lengths,
186                                        char **bufs)
187 {
188         struct ptlrpc_request *request;
189         int rc;
190         ENTRY;
191
192         LASSERT((unsigned long)imp > 0x1000);
193
194         OBD_ALLOC(request, sizeof(*request));
195         if (!request) {
196                 CERROR("request allocation out of memory\n");
197                 RETURN(NULL);
198         }
199
200         rc = lustre_pack_request(request, count, lengths, bufs);
201         if (rc) {
202                 CERROR("cannot pack request %d\n", rc);
203                 OBD_FREE(request, sizeof(*request));
204                 RETURN(NULL);
205         }
206         request->rq_reqmsg->version |= version;
207
208         if (imp->imp_server_timeout)
209                 request->rq_timeout = obd_timeout / 2;
210         else
211                 request->rq_timeout = obd_timeout;
212
213         request->rq_send_state = LUSTRE_IMP_FULL;
214         request->rq_type = PTL_RPC_MSG_REQUEST;
215         request->rq_import = class_import_get(imp);
216
217         request->rq_req_cbid.cbid_fn  = request_out_callback;
218         request->rq_req_cbid.cbid_arg = request;
219
220         request->rq_reply_cbid.cbid_fn  = reply_in_callback;
221         request->rq_reply_cbid.cbid_arg = request;
222         
223         request->rq_phase = RQ_PHASE_NEW;
224
225         /* XXX FIXME bug 249 */
226         request->rq_request_portal = imp->imp_client->cli_request_portal;
227         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
228
229         spin_lock_init(&request->rq_lock);
230         INIT_LIST_HEAD(&request->rq_list);
231         INIT_LIST_HEAD(&request->rq_replay_list);
232         INIT_LIST_HEAD(&request->rq_set_chain);
233         init_waitqueue_head(&request->rq_reply_waitq);
234         request->rq_xid = ptlrpc_next_xid();
235         atomic_set(&request->rq_refcount, 1);
236
237         request->rq_reqmsg->opc = opcode;
238         request->rq_reqmsg->flags = 0;
239         RETURN(request);
240 }
241
242 struct ptlrpc_request_set *ptlrpc_prep_set(void)
243 {
244         struct ptlrpc_request_set *set;
245
246         OBD_ALLOC(set, sizeof *set);
247         if (!set)
248                 RETURN(NULL);
249         INIT_LIST_HEAD(&set->set_requests);
250         init_waitqueue_head(&set->set_waitq);
251         set->set_remaining = 0;
252         spin_lock_init(&set->set_new_req_lock);
253         INIT_LIST_HEAD(&set->set_new_requests);
254
255         RETURN(set);
256 }
257
258 /* Finish with this set; opposite of prep_set. */
259 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
260 {
261         struct list_head *tmp;
262         struct list_head *next;
263         int               expected_phase;
264         int               n = 0;
265         ENTRY;
266
267         /* Requests on the set should either all be completed, or all be new */
268         expected_phase = (set->set_remaining == 0) ?
269                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
270         list_for_each (tmp, &set->set_requests) {
271                 struct ptlrpc_request *req =
272                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
273
274                 LASSERT(req->rq_phase == expected_phase);
275                 n++;
276         }
277
278         LASSERT(set->set_remaining == 0 || set->set_remaining == n);
279
280         list_for_each_safe(tmp, next, &set->set_requests) {
281                 struct ptlrpc_request *req =
282                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
283                 list_del_init(&req->rq_set_chain);
284
285                 LASSERT(req->rq_phase == expected_phase);
286
287                 if (req->rq_phase == RQ_PHASE_NEW) {
288
289                         if (req->rq_interpret_reply != NULL) {
290                                 int (*interpreter)(struct ptlrpc_request *,
291                                                    void *, int) =
292                                         req->rq_interpret_reply;
293
294                                 /* higher level (i.e. LOV) failed;
295                                  * let the sub reqs clean up */
296                                 req->rq_status = -EBADR;
297                                 interpreter(req, &req->rq_async_args,
298                                             req->rq_status);
299                         }
300                         set->set_remaining--;
301                 }
302
303                 req->rq_set = NULL;
304                 ptlrpc_req_finished (req);
305         }
306
307         LASSERT(set->set_remaining == 0);
308
309         OBD_FREE(set, sizeof(*set));
310         EXIT;
311 }
312
313 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
314                         struct ptlrpc_request *req)
315 {
316         /* The set takes over the caller's request reference */
317         list_add_tail(&req->rq_set_chain, &set->set_requests);
318         req->rq_set = set;
319         set->set_remaining++;
320         atomic_inc(&req->rq_import->imp_inflight);
321 }
322
323 /* lock so many callers can add things, the context that owns the set
324  * is supposed to notice these and move them into the set proper. */
325 void ptlrpc_set_add_new_req(struct ptlrpc_request_set *set,
326                             struct ptlrpc_request *req)
327 {
328         unsigned long flags;
329         spin_lock_irqsave(&set->set_new_req_lock, flags);
330         /* The set takes over the caller's request reference */
331         list_add_tail(&req->rq_set_chain, &set->set_new_requests);
332         req->rq_set = set;
333         spin_unlock_irqrestore(&set->set_new_req_lock, flags);
334 }
335
336 /*
337  * Based on the current state of the import, determine if the request
338  * can be sent, is an error, or should be delayed.
339  *
340  * Returns true if this request should be delayed. If false, and
341  * *status is set, then the request can not be sent and *status is the
342  * error code.  If false and status is 0, then request can be sent.
343  *
344  * The imp->imp_lock must be held.
345  */
346 static int ptlrpc_import_delay_req(struct obd_import *imp, 
347                                    struct ptlrpc_request *req, int *status)
348 {
349         int delay = 0;
350         ENTRY;
351
352         LASSERT (status != NULL);
353         *status = 0;
354
355         if (imp->imp_state == LUSTRE_IMP_NEW) {
356                 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
357                 *status = -EIO;
358                 LBUG();
359         }
360         else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
361                 DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
362                 *status = -EIO;
363         }
364         /* allow CONNECT even if import is invalid */
365         else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
366                  imp->imp_state == LUSTRE_IMP_CONNECTING) {
367                 ;
368         }
369         /*
370          * If the import has been invalidated (such as by an OST failure), the
371          * request must fail with -EIO.  
372          */
373         else if (imp->imp_invalid) {
374                 DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
375                 *status = -EIO;
376         } 
377         else if (req->rq_import_generation != imp->imp_generation) {
378                 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
379                 *status = -EIO;
380         } 
381         else if (req->rq_send_state != imp->imp_state) {
382                 if (imp->imp_obd->obd_no_recov || imp->imp_dlm_fake 
383                     || req->rq_no_delay) 
384                         *status = -EWOULDBLOCK;
385                 else
386                         delay = 1;
387         }
388
389         RETURN(delay);
390 }
391
392 static int ptlrpc_check_reply(struct ptlrpc_request *req)
393 {
394         unsigned long flags;
395         int rc = 0;
396         ENTRY;
397
398         /* serialise with network callback */
399         spin_lock_irqsave (&req->rq_lock, flags);
400
401         if (req->rq_replied) {
402                 DEBUG_REQ(D_NET, req, "REPLIED:");
403                 GOTO(out, rc = 1);
404         }
405         
406         if (req->rq_net_err && !req->rq_timedout) {
407                 spin_unlock_irqrestore (&req->rq_lock, flags);
408                 rc = ptlrpc_expire_one_request(req); 
409                 spin_lock_irqsave (&req->rq_lock, flags);
410                 GOTO(out, rc);
411         }
412
413         if (req->rq_err) {
414                 DEBUG_REQ(D_ERROR, req, "ABORTED:");
415                 GOTO(out, rc = 1);
416         }
417
418         if (req->rq_resend) {
419                 DEBUG_REQ(D_ERROR, req, "RESEND:");
420                 GOTO(out, rc = 1);
421         }
422
423         if (req->rq_restart) {
424                 DEBUG_REQ(D_ERROR, req, "RESTART:");
425                 GOTO(out, rc = 1);
426         }
427         EXIT;
428  out:
429         spin_unlock_irqrestore (&req->rq_lock, flags);
430         DEBUG_REQ(D_NET, req, "rc = %d for", rc);
431         return rc;
432 }
433
434 static int ptlrpc_check_status(struct ptlrpc_request *req)
435 {
436         int err;
437         ENTRY;
438
439         err = req->rq_repmsg->status;
440         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
441                 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR, err == %d", 
442                           err);
443                 RETURN(err < 0 ? err : -EINVAL);
444         }
445
446         if (err < 0) {
447                 DEBUG_REQ(D_INFO, req, "status is %d", err);
448         } else if (err > 0) {
449                 /* XXX: translate this error from net to host */
450                 DEBUG_REQ(D_INFO, req, "status is %d", err);
451         }
452
453         RETURN(err);
454 }
455
456 static int after_reply(struct ptlrpc_request *req)
457 {
458         unsigned long flags;
459         struct obd_import *imp = req->rq_import;
460         int rc;
461         ENTRY;
462
463         LASSERT(!req->rq_receiving_reply);
464
465         /* NB Until this point, the whole of the incoming message,
466          * including buflens, status etc is in the sender's byte order. */
467
468 #if SWAB_PARANOIA
469         /* Clear reply swab mask; this is a new reply in sender's byte order */
470         req->rq_rep_swab_mask = 0;
471 #endif
472         LASSERT (req->rq_nob_received <= req->rq_replen);
473         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
474         if (rc) {
475                 CERROR("unpack_rep failed: %d\n", rc);
476                 RETURN(-EPROTO);
477         }
478
479         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
480             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
481                 CERROR("invalid packet type received (type=%u)\n",
482                        req->rq_repmsg->type);
483                 RETURN(-EPROTO);
484         }
485
486         rc = ptlrpc_check_status(req);
487
488         /* Either we've been evicted, or the server has failed for
489          * some reason. Try to reconnect, and if that fails, punt to the
490          * upcall. */
491         if (rc == -ENOTCONN) {
492                 if (req->rq_send_state != LUSTRE_IMP_FULL ||
493                     imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
494                         RETURN(-ENOTCONN);
495                 }
496
497                 ptlrpc_request_handle_notconn(req);
498
499                 RETURN(rc);
500         }
501
502         /* Store transno in reqmsg for replay. */
503         req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
504
505
506         if (req->rq_import->imp_replayable) {
507                 spin_lock_irqsave(&imp->imp_lock, flags);
508                 if (req->rq_replay || req->rq_transno != 0)
509                         ptlrpc_retain_replayable_request(req, imp);
510                 else if (req->rq_commit_cb != NULL) {
511                         spin_unlock_irqrestore(&imp->imp_lock, flags);
512                         req->rq_commit_cb(req);
513                         spin_lock_irqsave(&imp->imp_lock, flags);
514                 }
515
516                 if (req->rq_transno > imp->imp_max_transno)
517                         imp->imp_max_transno = req->rq_transno;
518
519                 /* Replay-enabled imports return commit-status information. */
520                 if (req->rq_repmsg->last_committed)
521                         imp->imp_peer_committed_transno =
522                                 req->rq_repmsg->last_committed;
523                 ptlrpc_free_committed(imp);
524                 spin_unlock_irqrestore(&imp->imp_lock, flags);
525         }
526
527         RETURN(rc);
528 }
529
530 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
531 {
532         char                   str[PTL_NALFMT_SIZE];
533         struct obd_import     *imp;
534         unsigned long          flags;
535         int rc;
536         ENTRY;
537
538         LASSERT(req->rq_phase == RQ_PHASE_NEW);
539         req->rq_phase = RQ_PHASE_RPC;
540
541         imp = req->rq_import;
542         spin_lock_irqsave(&imp->imp_lock, flags);
543
544         req->rq_import_generation = imp->imp_generation;
545
546         if (ptlrpc_import_delay_req(imp, req, &rc)) {
547                 spin_lock (&req->rq_lock);
548                 req->rq_waiting = 1;
549                 spin_unlock (&req->rq_lock);
550
551                 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
552                           "(%s != %s)",
553                           req->rq_reqmsg->status, 
554                           ptlrpc_import_state_name(req->rq_send_state),
555                           ptlrpc_import_state_name(imp->imp_state));
556                 LASSERT(list_empty (&req->rq_list));
557
558                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
559                 spin_unlock_irqrestore(&imp->imp_lock, flags);
560                 RETURN(0);
561         }
562
563         if (rc != 0) {
564                 spin_unlock_irqrestore(&imp->imp_lock, flags);
565                 req->rq_status = rc;
566                 req->rq_phase = RQ_PHASE_INTERPRET;
567                 RETURN(rc);
568         }
569
570         /* XXX this is the same as ptlrpc_queue_wait */
571         LASSERT(list_empty(&req->rq_list));
572         list_add_tail(&req->rq_list, &imp->imp_sending_list);
573         spin_unlock_irqrestore(&imp->imp_lock, flags);
574
575         req->rq_reqmsg->status = current->pid;
576         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc"
577                " %s:%s:%d:"LPU64":%s:%s:%d\n", current->comm,
578                imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
579                req->rq_xid,
580                imp->imp_connection->c_peer.peer_ni->pni_name,
581                ptlrpc_peernid2str(&imp->imp_connection->c_peer, str),
582                req->rq_reqmsg->opc);
583
584         rc = ptl_send_rpc(req);
585         if (rc) {
586                 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
587                 req->rq_net_err = 1;
588                 RETURN(rc);
589         }
590         RETURN(0);
591 }
592
593 int ptlrpc_check_set(struct ptlrpc_request_set *set)
594 {
595         char str[PTL_NALFMT_SIZE];
596         unsigned long flags;
597         struct list_head *tmp;
598         int force_timer_recalc = 0;
599         ENTRY;
600
601         if (set->set_remaining == 0)
602                 RETURN(1);
603
604         list_for_each(tmp, &set->set_requests) {
605                 struct ptlrpc_request *req =
606                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
607                 struct obd_import *imp = req->rq_import;
608                 int rc = 0;
609
610                 if (req->rq_phase == RQ_PHASE_NEW &&
611                     ptlrpc_send_new_req(req)) {
612                         force_timer_recalc = 1;
613                 }
614
615                 if (!(req->rq_phase == RQ_PHASE_RPC ||
616                       req->rq_phase == RQ_PHASE_BULK ||
617                       req->rq_phase == RQ_PHASE_INTERPRET ||
618                       req->rq_phase == RQ_PHASE_COMPLETE)) {
619                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
620                         LBUG();
621                 }
622
623                 if (req->rq_phase == RQ_PHASE_COMPLETE)
624                         continue;
625
626                 if (req->rq_phase == RQ_PHASE_INTERPRET)
627                         GOTO(interpret, req->rq_status);
628
629                 if (req->rq_net_err && !req->rq_timedout)
630                         ptlrpc_expire_one_request(req); 
631
632                 if (req->rq_err) {
633                         ptlrpc_unregister_reply(req);
634                         if (req->rq_status == 0)
635                                 req->rq_status = -EIO;
636                         req->rq_phase = RQ_PHASE_INTERPRET;
637
638                         spin_lock_irqsave(&imp->imp_lock, flags);
639                         list_del_init(&req->rq_list);
640                         spin_unlock_irqrestore(&imp->imp_lock, flags);
641
642                         GOTO(interpret, req->rq_status);
643                 }
644
645                 /* ptlrpc_queue_wait->l_wait_event guarantees that rq_intr
646                  * will only be set after rq_timedout, but the oig waiting
647                  * path sets rq_intr irrespective of whether ptlrpcd has
648                  * seen a timeout.  our policy is to only interpret 
649                  * interrupted rpcs after they have timed out */
650                 if (req->rq_intr && (req->rq_timedout || req->rq_waiting)) {
651                         /* NB could be on delayed list */
652                         ptlrpc_unregister_reply(req);
653                         req->rq_status = -EINTR;
654                         req->rq_phase = RQ_PHASE_INTERPRET;
655
656                         spin_lock_irqsave(&imp->imp_lock, flags);
657                         list_del_init(&req->rq_list);
658                         spin_unlock_irqrestore(&imp->imp_lock, flags);
659
660                         GOTO(interpret, req->rq_status);
661                 }
662
663                 if (req->rq_phase == RQ_PHASE_RPC) {
664                         if (req->rq_waiting || req->rq_resend) {
665                                 int status;
666
667                                 ptlrpc_unregister_reply(req);
668
669                                 spin_lock_irqsave(&imp->imp_lock, flags);
670
671                                 if (ptlrpc_import_delay_req(imp, req, &status)){
672                                         spin_unlock_irqrestore(&imp->imp_lock,
673                                                                flags);
674                                         continue;
675                                 } 
676
677                                 list_del_init(&req->rq_list);
678                                 if (status != 0)  {
679                                         req->rq_status = status;
680                                         req->rq_phase = RQ_PHASE_INTERPRET;
681                                         spin_unlock_irqrestore(&imp->imp_lock,
682                                                                flags);
683                                         GOTO(interpret, req->rq_status);
684                                 }
685                                 if (req->rq_no_resend) {
686                                         req->rq_status = -ENOTCONN;
687                                         req->rq_phase = RQ_PHASE_INTERPRET;
688                                         spin_unlock_irqrestore(&imp->imp_lock,
689                                                                flags);
690                                         GOTO(interpret, req->rq_status);
691                                 }
692                                 list_add_tail(&req->rq_list,
693                                               &imp->imp_sending_list);
694
695                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
696
697                                 req->rq_waiting = 0;
698                                 if (req->rq_resend) {
699                                         lustre_msg_add_flags(req->rq_reqmsg,
700                                                              MSG_RESENT);
701                                         if (req->rq_bulk) {
702                                                 __u64 old_xid = req->rq_xid;
703
704                                                 ptlrpc_unregister_bulk (req);
705
706                                                 /* ensure previous bulk fails */
707                                                 req->rq_xid = ptlrpc_next_xid();
708                                                 CDEBUG(D_HA, "resend bulk "
709                                                        "old x"LPU64
710                                                        " new x"LPU64"\n",
711                                                        old_xid, req->rq_xid);
712                                         }
713                                 }
714
715                                 rc = ptl_send_rpc(req);
716                                 if (rc) {
717                                         DEBUG_REQ(D_HA, req, "send failed (%d)",
718                                                   rc);
719                                         force_timer_recalc = 1;
720                                         req->rq_net_err = 1;
721                                 }
722                                 /* need to reset the timeout */
723                                 force_timer_recalc = 1;
724                         }
725
726                         /* Still waiting for a reply? */
727                         if (ptlrpc_client_receiving_reply(req))
728                                 continue;
729
730                         /* Did we actually receive a reply? */
731                         if (!ptlrpc_client_replied(req))
732                                 continue;
733
734                         spin_lock_irqsave(&imp->imp_lock, flags);
735                         list_del_init(&req->rq_list);
736                         spin_unlock_irqrestore(&imp->imp_lock, flags);
737
738                         req->rq_status = after_reply(req);
739                         if (req->rq_resend) {
740                                 /* Add this req to the delayed list so
741                                    it can be errored if the import is
742                                    evicted after recovery. */
743                                 spin_lock_irqsave (&req->rq_lock, flags);
744                                 list_add_tail(&req->rq_list, 
745                                               &imp->imp_delayed_list);
746                                 spin_unlock_irqrestore(&req->rq_lock, flags);
747                                 continue;
748                         }
749
750                         /* If there is no bulk associated with this request,
751                          * then we're done and should let the interpreter
752                          * process the reply.  Similarly if the RPC returned
753                          * an error, and therefore the bulk will never arrive.
754                          */
755                         if (req->rq_bulk == NULL || req->rq_status != 0) {
756                                 req->rq_phase = RQ_PHASE_INTERPRET;
757                                 GOTO(interpret, req->rq_status);
758                         }
759
760                         req->rq_phase = RQ_PHASE_BULK;
761                 }
762
763                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
764                 if (ptlrpc_bulk_active(req->rq_bulk))
765                         continue;
766
767                 if (!req->rq_bulk->bd_success) {
768                         /* The RPC reply arrived OK, but the bulk screwed
769                          * up!  Dead wierd since the server told us the RPC
770                          * was good after getting the REPLY for her GET or
771                          * the ACK for her PUT. */
772                         DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
773                         LBUG();
774                 }
775
776                 req->rq_phase = RQ_PHASE_INTERPRET;
777
778         interpret:
779                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
780                 LASSERT(!req->rq_receiving_reply);
781
782                 ptlrpc_unregister_reply(req);
783                 if (req->rq_bulk != NULL)
784                         ptlrpc_unregister_bulk (req);
785
786                 req->rq_phase = RQ_PHASE_COMPLETE;
787
788                 if (req->rq_interpret_reply != NULL) {
789                         int (*interpreter)(struct ptlrpc_request *,void *,int) =
790                                 req->rq_interpret_reply;
791                         req->rq_status = interpreter(req, &req->rq_async_args,
792                                                      req->rq_status);
793                 }
794
795                 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:"
796                        "opc %s:%s:%d:"LPU64":%s:%s:%d\n", current->comm,
797                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
798                        req->rq_xid,
799                        imp->imp_connection->c_peer.peer_ni->pni_name,
800                        ptlrpc_peernid2str(&imp->imp_connection->c_peer, str),
801                        req->rq_reqmsg->opc);
802
803                 set->set_remaining--;
804
805                 atomic_dec(&imp->imp_inflight);
806                 wake_up(&imp->imp_recovery_waitq);
807         }
808
809         /* If we hit an error, we want to recover promptly. */
810         RETURN(set->set_remaining == 0 || force_timer_recalc);
811 }
812
813 int ptlrpc_expire_one_request(struct ptlrpc_request *req)
814 {
815         unsigned long      flags;
816         struct obd_import *imp = req->rq_import;
817         int replied = 0;
818         ENTRY;
819
820         spin_lock_irqsave (&req->rq_lock, flags);
821         replied = req->rq_replied;
822         if (!replied)
823                 req->rq_timedout = 1;
824         spin_unlock_irqrestore (&req->rq_lock, flags);
825
826         if (replied)
827                 RETURN(0);
828
829         DEBUG_REQ(D_ERROR, req, "timeout (sent at %lu)", (long)req->rq_sent); 
830
831         ptlrpc_unregister_reply (req);
832
833         if (req->rq_bulk != NULL)
834                 ptlrpc_unregister_bulk (req);
835
836         if (imp == NULL) {
837                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
838                 RETURN(1);
839         }
840
841         /* The DLM server doesn't want recovery run on its imports. */
842         if (imp->imp_dlm_fake)
843                 RETURN(1);
844
845         /* If this request is for recovery or other primordial tasks,
846          * then error it out here. */
847         if (req->rq_send_state != LUSTRE_IMP_FULL || 
848             imp->imp_obd->obd_no_recov) {
849                 spin_lock_irqsave (&req->rq_lock, flags);
850                 req->rq_status = -ETIMEDOUT;
851                 req->rq_err = 1;
852                 spin_unlock_irqrestore (&req->rq_lock, flags);
853                 RETURN(1);
854         }
855
856         ptlrpc_fail_import(imp, req->rq_import_generation);
857
858         RETURN(0);
859 }
860
861 int ptlrpc_expired_set(void *data)
862 {
863         struct ptlrpc_request_set *set = data;
864         struct list_head          *tmp;
865         time_t                     now = LTIME_S (CURRENT_TIME);
866         ENTRY;
867
868         LASSERT(set != NULL);
869
870         /* A timeout expired; see which reqs it applies to... */
871         list_for_each (tmp, &set->set_requests) {
872                 struct ptlrpc_request *req =
873                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
874
875                 /* request in-flight? */
876                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting 
877                        && !req->rq_resend) ||
878                       (req->rq_phase == RQ_PHASE_BULK)))
879                         continue;
880
881                 if (req->rq_timedout ||           /* already dealt with */
882                     req->rq_sent + req->rq_timeout > now) /* not expired */
883                         continue;
884
885                 /* deal with this guy */
886                 ptlrpc_expire_one_request (req);
887         }
888
889         /* When waiting for a whole set, we always to break out of the
890          * sleep so we can recalculate the timeout, or enable interrupts
891          * iff everyone's timed out.
892          */
893         RETURN(1);
894 }
895
896 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
897 {
898         unsigned long flags;
899         spin_lock_irqsave(&req->rq_lock, flags);
900         req->rq_intr = 1;
901         spin_unlock_irqrestore(&req->rq_lock, flags);
902 }
903
904 void ptlrpc_interrupted_set(void *data)
905 {
906         struct ptlrpc_request_set *set = data;
907         struct list_head *tmp;
908
909         LASSERT(set != NULL);
910         CERROR("INTERRUPTED SET %p\n", set);
911
912         list_for_each(tmp, &set->set_requests) {
913                 struct ptlrpc_request *req =
914                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
915
916                 if (req->rq_phase != RQ_PHASE_RPC)
917                         continue;
918
919                 ptlrpc_mark_interrupted(req);
920         }
921 }
922
923 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
924 {
925         struct list_head      *tmp;
926         time_t                 now = LTIME_S(CURRENT_TIME);
927         time_t                 deadline;
928         int                    timeout = 0;
929         struct ptlrpc_request *req;
930         ENTRY;
931
932         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
933
934         list_for_each(tmp, &set->set_requests) {
935                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
936
937                 /* request in-flight? */
938                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
939                       (req->rq_phase == RQ_PHASE_BULK)))
940                         continue;
941
942                 if (req->rq_timedout)   /* already timed out */
943                         continue;
944
945                 deadline = req->rq_sent + req->rq_timeout;
946                 if (deadline <= now)    /* actually expired already */
947                         timeout = 1;    /* ASAP */
948                 else if (timeout == 0 || timeout > deadline - now)
949                         timeout = deadline - now;
950         }
951         RETURN(timeout);
952 }
953                 
954
955 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
956 {
957         struct list_head      *tmp;
958         struct ptlrpc_request *req;
959         struct l_wait_info     lwi;
960         int                    rc, timeout;
961         ENTRY;
962
963         LASSERT(!list_empty(&set->set_requests));
964         list_for_each(tmp, &set->set_requests) {
965                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
966                 if (req->rq_phase == RQ_PHASE_NEW)
967                         (void)ptlrpc_send_new_req(req);
968         }
969
970         do {
971                 timeout = ptlrpc_set_next_timeout(set);
972
973                 /* wait until all complete, interrupted, or an in-flight
974                  * req times out */
975                 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
976                        set, timeout);
977                 lwi = LWI_TIMEOUT_INTR((timeout ? timeout : 1) * HZ,
978                                        ptlrpc_expired_set, 
979                                        ptlrpc_interrupted_set, set);
980                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
981
982                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
983
984                 /* -EINTR => all requests have been flagged rq_intr so next
985                  * check completes.
986                  * -ETIMEOUTD => someone timed out.  When all reqs have
987                  * timed out, signals are enabled allowing completion with
988                  * EINTR.
989                  * I don't really care if we go once more round the loop in
990                  * the error cases -eeb. */
991         } while (rc != 0 || set->set_remaining != 0);
992
993         LASSERT(set->set_remaining == 0);
994
995         rc = 0;
996         list_for_each(tmp, &set->set_requests) {
997                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
998
999                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
1000                 if (req->rq_status != 0)
1001                         rc = req->rq_status;
1002         }
1003
1004         if (set->set_interpret != NULL) {
1005                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
1006                         set->set_interpret;
1007                 rc = interpreter (set, &set->set_args, rc);
1008         }
1009
1010         RETURN(rc);
1011 }
1012
1013 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
1014 {
1015         ENTRY;
1016         if (request == NULL) {
1017                 EXIT;
1018                 return;
1019         }
1020
1021         LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
1022         LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
1023         LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
1024         LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
1025
1026         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
1027          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
1028         if (request->rq_import != NULL) {
1029                 unsigned long flags = 0;
1030                 if (!locked)
1031                         spin_lock_irqsave(&request->rq_import->imp_lock, flags);
1032                 list_del_init(&request->rq_replay_list);
1033                 if (!locked)
1034                         spin_unlock_irqrestore(&request->rq_import->imp_lock,
1035                                                flags);
1036         }
1037         LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
1038
1039         if (atomic_read(&request->rq_refcount) != 0) {
1040                 DEBUG_REQ(D_ERROR, request,
1041                           "freeing request with nonzero refcount");
1042                 LBUG();
1043         }
1044
1045         if (request->rq_repmsg != NULL) {
1046                 OBD_FREE(request->rq_repmsg, request->rq_replen);
1047                 request->rq_repmsg = NULL;
1048         }
1049         if (request->rq_reqmsg != NULL) {
1050                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
1051                 request->rq_reqmsg = NULL;
1052         }
1053         if (request->rq_export != NULL) {
1054                 class_export_put(request->rq_export);
1055                 request->rq_export = NULL;
1056         }
1057         if (request->rq_import != NULL) {
1058                 class_import_put(request->rq_import);
1059                 request->rq_import = NULL;
1060         }
1061         if (request->rq_bulk != NULL)
1062                 ptlrpc_free_bulk(request->rq_bulk);
1063
1064         OBD_FREE(request, sizeof(*request));
1065         EXIT;
1066 }
1067
1068 void ptlrpc_free_req(struct ptlrpc_request *request)
1069 {
1070         __ptlrpc_free_req(request, 0);
1071 }
1072
1073 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
1074 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
1075 {
1076         LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
1077         (void)__ptlrpc_req_finished(request, 1);
1078 }
1079
1080 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
1081 {
1082         ENTRY;
1083         if (request == NULL)
1084                 RETURN(1);
1085
1086         if (request == LP_POISON ||
1087             request->rq_reqmsg == LP_POISON) {
1088                 CERROR("dereferencing freed request (bug 575)\n");
1089                 LBUG();
1090                 RETURN(1);
1091         }
1092
1093         DEBUG_REQ(D_INFO, request, "refcount now %u",
1094                   atomic_read(&request->rq_refcount) - 1);
1095
1096         if (atomic_dec_and_test(&request->rq_refcount)) {
1097                 __ptlrpc_free_req(request, locked);
1098                 RETURN(1);
1099         }
1100
1101         RETURN(0);
1102 }
1103
1104 void ptlrpc_req_finished(struct ptlrpc_request *request)
1105 {
1106         __ptlrpc_req_finished(request, 0);
1107 }
1108
1109 /* Disengage the client's reply buffer from the network
1110  * NB does _NOT_ unregister any client-side bulk.
1111  * IDEMPOTENT, but _not_ safe against concurrent callers.
1112  * The request owner (i.e. the thread doing the I/O) must call...
1113  */
1114 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
1115 {
1116         int                rc;
1117         wait_queue_head_t *wq;
1118         struct l_wait_info lwi;
1119
1120         LASSERT(!in_interrupt ());             /* might sleep */
1121
1122         if (!ptlrpc_client_receiving_reply(request))
1123                 return;
1124
1125         PtlMDUnlink (request->rq_reply_md_h);
1126
1127         /* We have to l_wait_event() whatever the result, to give liblustre
1128          * a chance to run reply_in_callback() */
1129
1130         if (request->rq_set != NULL)
1131                 wq = &request->rq_set->set_waitq;
1132         else
1133                 wq = &request->rq_reply_waitq;
1134
1135         for (;;) {
1136                 /* Network access will complete in finite time but the HUGE
1137                  * timeout lets us CWARN for visibility of sluggish NALs */
1138                 lwi = LWI_TIMEOUT(300 * HZ, NULL, NULL);
1139                 rc = l_wait_event (*wq, !ptlrpc_client_receiving_reply(request), &lwi);
1140                 if (rc == 0)
1141                         return;
1142
1143                 LASSERT (rc == -ETIMEDOUT);
1144                 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout");
1145         }
1146 }
1147
1148 /* caller must hold imp->imp_lock */
1149 void ptlrpc_free_committed(struct obd_import *imp)
1150 {
1151         struct list_head *tmp, *saved;
1152         struct ptlrpc_request *req;
1153         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1154         ENTRY;
1155
1156         LASSERT(imp != NULL);
1157
1158         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1159
1160         CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1161                imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1162
1163         list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1164                 req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1165
1166                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
1167                 LASSERT(req != last_req);
1168                 last_req = req;
1169
1170                 if (req->rq_import_generation < imp->imp_generation) {
1171                         DEBUG_REQ(D_HA, req, "freeing request with old gen");
1172                         GOTO(free_req, 0);
1173                 }
1174
1175                 if (req->rq_replay) {
1176                         DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1177                         continue;
1178                 }
1179
1180                 /* not yet committed */
1181                 if (req->rq_transno > imp->imp_peer_committed_transno) {
1182                         DEBUG_REQ(D_HA, req, "stopping search");
1183                         break;
1184                 }
1185
1186                 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1187                           imp->imp_peer_committed_transno);
1188 free_req:
1189                 if (req->rq_commit_cb != NULL)
1190                         req->rq_commit_cb(req);
1191                 list_del_init(&req->rq_replay_list);
1192                 __ptlrpc_req_finished(req, 1);
1193         }
1194
1195         EXIT;
1196         return;
1197 }
1198
1199 void ptlrpc_cleanup_client(struct obd_import *imp)
1200 {
1201         ENTRY;
1202         EXIT;
1203         return;
1204 }
1205
1206 void ptlrpc_resend_req(struct ptlrpc_request *req)
1207 {
1208         unsigned long flags;
1209
1210         DEBUG_REQ(D_HA, req, "going to resend");
1211         req->rq_reqmsg->handle.cookie = 0;
1212         req->rq_status = -EAGAIN;
1213
1214         spin_lock_irqsave (&req->rq_lock, flags);
1215         req->rq_resend = 1;
1216         req->rq_net_err = 0;
1217         req->rq_timedout = 0;
1218         if (req->rq_bulk) {
1219                 __u64 old_xid = req->rq_xid;
1220                 
1221                 /* ensure previous bulk fails */
1222                 req->rq_xid = ptlrpc_next_xid();
1223                 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
1224                        old_xid, req->rq_xid);
1225         }
1226         ptlrpc_wake_client_req(req);
1227         spin_unlock_irqrestore (&req->rq_lock, flags);
1228 }
1229
1230 /* XXX: this function and rq_status are currently unused */
1231 void ptlrpc_restart_req(struct ptlrpc_request *req)
1232 {
1233         unsigned long flags;
1234
1235         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1236         req->rq_status = -ERESTARTSYS;
1237
1238         spin_lock_irqsave (&req->rq_lock, flags);
1239         req->rq_restart = 1;
1240         req->rq_timedout = 0;
1241         ptlrpc_wake_client_req(req);
1242         spin_unlock_irqrestore (&req->rq_lock, flags);
1243 }
1244
1245 static int expired_request(void *data)
1246 {
1247         struct ptlrpc_request *req = data;
1248         ENTRY;
1249
1250         RETURN(ptlrpc_expire_one_request(req));
1251 }
1252
1253 static void interrupted_request(void *data)
1254 {
1255         unsigned long flags;
1256
1257         struct ptlrpc_request *req = data;
1258         DEBUG_REQ(D_HA, req, "request interrupted");
1259         spin_lock_irqsave (&req->rq_lock, flags);
1260         req->rq_intr = 1;
1261         spin_unlock_irqrestore (&req->rq_lock, flags);
1262 }
1263
1264 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1265 {
1266         ENTRY;
1267         atomic_inc(&req->rq_refcount);
1268         RETURN(req);
1269 }
1270
1271 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1272                                       struct obd_import *imp)
1273 {
1274         struct list_head *tmp;
1275
1276         LASSERT_SPIN_LOCKED(&imp->imp_lock);
1277
1278         /* clear this  for new requests that were resent as well
1279            as resent replayed requests. */
1280         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1281
1282         /* don't re-add requests that have been replayed */
1283         if (!list_empty(&req->rq_replay_list))
1284                 return;
1285
1286         lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
1287
1288         LASSERT(imp->imp_replayable);
1289         /* Balanced in ptlrpc_free_committed, usually. */
1290         ptlrpc_request_addref(req);
1291         list_for_each_prev(tmp, &imp->imp_replay_list) {
1292                 struct ptlrpc_request *iter =
1293                         list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1294
1295                 /* We may have duplicate transnos if we create and then
1296                  * open a file, or for closes retained if to match creating
1297                  * opens, so use req->rq_xid as a secondary key.
1298                  * (See bugs 684, 685, and 428.)
1299                  * XXX no longer needed, but all opens need transnos!
1300                  */
1301                 if (iter->rq_transno > req->rq_transno)
1302                         continue;
1303
1304                 if (iter->rq_transno == req->rq_transno) {
1305                         LASSERT(iter->rq_xid != req->rq_xid);
1306                         if (iter->rq_xid > req->rq_xid)
1307                                 continue;
1308                 }
1309
1310                 list_add(&req->rq_replay_list, &iter->rq_replay_list);
1311                 return;
1312         }
1313
1314         list_add_tail(&req->rq_replay_list, &imp->imp_replay_list);
1315 }
1316
1317 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1318 {
1319         char str[PTL_NALFMT_SIZE];
1320         int rc = 0;
1321         int brc;
1322         struct l_wait_info lwi;
1323         struct obd_import *imp = req->rq_import;
1324         unsigned long flags;
1325         int timeout = 0;
1326         ENTRY;
1327
1328         LASSERT(req->rq_set == NULL);
1329         LASSERT(!req->rq_receiving_reply);
1330         atomic_inc(&imp->imp_inflight);
1331
1332         if (imp->imp_connection == NULL) {
1333                 CERROR("request on not connected import %s\n",
1334                         imp->imp_obd->obd_name);
1335                 RETURN(-EINVAL);
1336         }
1337
1338         /* for distributed debugging */
1339         req->rq_reqmsg->status = current->pid;
1340         LASSERT(imp->imp_obd != NULL);
1341         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc "
1342                "%s:%s:%d:"LPU64":%s:%s:%d\n", current->comm,
1343                imp->imp_obd->obd_uuid.uuid,
1344                req->rq_reqmsg->status, req->rq_xid,
1345                imp->imp_connection->c_peer.peer_ni->pni_name,
1346                ptlrpc_peernid2str(&imp->imp_connection->c_peer, str),
1347                req->rq_reqmsg->opc);
1348
1349         /* Mark phase here for a little debug help */
1350         req->rq_phase = RQ_PHASE_RPC;
1351
1352         spin_lock_irqsave(&imp->imp_lock, flags);
1353         req->rq_import_generation = imp->imp_generation;
1354 restart:
1355         if (ptlrpc_import_delay_req(imp, req, &rc)) {
1356                 list_del(&req->rq_list);
1357
1358                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1359                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1360
1361                 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%s != %s)",
1362                           current->comm, 
1363                           ptlrpc_import_state_name(req->rq_send_state), 
1364                           ptlrpc_import_state_name(imp->imp_state));
1365                 lwi = LWI_INTR(interrupted_request, req);
1366                 rc = l_wait_event(req->rq_reply_waitq,
1367                                   (req->rq_send_state == imp->imp_state ||
1368                                    req->rq_err),
1369                                   &lwi);
1370                 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%s == %s or %d == 1)",
1371                           current->comm, 
1372                           ptlrpc_import_state_name(imp->imp_state), 
1373                           ptlrpc_import_state_name(req->rq_send_state),
1374                           req->rq_err);
1375
1376                 spin_lock_irqsave(&imp->imp_lock, flags);
1377                 list_del_init(&req->rq_list);
1378
1379                 if (req->rq_err) {
1380                         rc = -EIO;
1381                 } 
1382                 else if (req->rq_intr) {
1383                         rc = -EINTR;
1384                 }
1385                 else if (req->rq_no_resend) {
1386                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1387                         GOTO(out, rc = -ETIMEDOUT);
1388                 }
1389                 else {
1390                         GOTO(restart, rc);
1391                 }
1392         } 
1393
1394         if (rc != 0) {
1395                 list_del_init(&req->rq_list);
1396                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1397                 req->rq_status = rc; // XXX this ok?
1398                 GOTO(out, rc);
1399         }
1400
1401         if (req->rq_resend) {
1402                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1403
1404                 if (req->rq_bulk != NULL)
1405                         ptlrpc_unregister_bulk (req);
1406
1407                 DEBUG_REQ(D_HA, req, "resending: ");
1408         }
1409
1410         /* XXX this is the same as ptlrpc_set_wait */
1411         LASSERT(list_empty(&req->rq_list));
1412         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1413         spin_unlock_irqrestore(&imp->imp_lock, flags);
1414
1415         rc = ptl_send_rpc(req);
1416         if (rc) {
1417                 DEBUG_REQ(D_HA, req, "send failed (%d); recovering", rc);
1418                 timeout = 1;
1419         } else {
1420                 timeout = MAX(req->rq_timeout * HZ, 1);
1421                 DEBUG_REQ(D_NET, req, "-- sleeping for %d jiffies", timeout);
1422         }
1423         lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1424                                req);
1425         l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req), &lwi);
1426         DEBUG_REQ(D_NET, req, "-- done sleeping");
1427
1428         CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:opc "
1429                "%s:%s:%d:"LPU64":%s:%s:%d\n", current->comm,
1430                imp->imp_obd->obd_uuid.uuid,
1431                req->rq_reqmsg->status, req->rq_xid,
1432                imp->imp_connection->c_peer.peer_ni->pni_name,
1433                ptlrpc_peernid2str(&imp->imp_connection->c_peer, str),
1434                req->rq_reqmsg->opc);
1435
1436         spin_lock_irqsave(&imp->imp_lock, flags);
1437         list_del_init(&req->rq_list);
1438         spin_unlock_irqrestore(&imp->imp_lock, flags);
1439
1440         /* If the reply was received normally, this just grabs the spinlock
1441          * (ensuring the reply callback has returned), sees that
1442          * req->rq_receiving_reply is clear and returns. */
1443         ptlrpc_unregister_reply (req);
1444
1445         if (req->rq_err)
1446                 GOTO(out, rc = -EIO);
1447
1448         /* Resend if we need to, unless we were interrupted. */
1449         if (req->rq_resend && !req->rq_intr) {
1450                 /* ...unless we were specifically told otherwise. */
1451                 if (req->rq_no_resend)
1452                         GOTO(out, rc = -ETIMEDOUT);
1453                 spin_lock_irqsave(&imp->imp_lock, flags);
1454                 goto restart;
1455         }
1456
1457         if (req->rq_intr) {
1458                 /* Should only be interrupted if we timed out. */
1459                 if (!req->rq_timedout)
1460                         DEBUG_REQ(D_ERROR, req,
1461                                   "rq_intr set but rq_timedout not");
1462                 GOTO(out, rc = -EINTR);
1463         }
1464
1465         if (req->rq_timedout) {                 /* non-recoverable timeout */
1466                 GOTO(out, rc = -ETIMEDOUT);
1467         }
1468
1469         if (!req->rq_replied) {
1470                 /* How can this be? -eeb */
1471                 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1472                 LBUG();
1473                 GOTO(out, rc = req->rq_status);
1474         }
1475
1476         rc = after_reply (req);
1477         /* NB may return +ve success rc */
1478         if (req->rq_resend) {
1479                 spin_lock_irqsave(&imp->imp_lock, flags);
1480                 goto restart;
1481         }
1482
1483  out:
1484         if (req->rq_bulk != NULL) {
1485                 if (rc >= 0) {                  
1486                         /* success so far.  Note that anything going wrong
1487                          * with bulk now, is EXTREMELY strange, since the
1488                          * server must have believed that the bulk
1489                          * tranferred OK before she replied with success to
1490                          * me. */
1491                         lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1492                         brc = l_wait_event(req->rq_reply_waitq,
1493                                            !ptlrpc_bulk_active(req->rq_bulk),
1494                                            &lwi);
1495                         LASSERT(brc == 0 || brc == -ETIMEDOUT);
1496                         if (brc != 0) {
1497                                 LASSERT(brc == -ETIMEDOUT);
1498                                 DEBUG_REQ(D_ERROR, req, "bulk timed out");
1499                                 rc = brc;
1500                         } else if (!req->rq_bulk->bd_success) {
1501                                 DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1502                                 rc = -EIO;
1503                         }
1504                 }
1505                 if (rc < 0)
1506                         ptlrpc_unregister_bulk (req);
1507         }
1508
1509         LASSERT(!req->rq_receiving_reply);
1510         req->rq_phase = RQ_PHASE_INTERPRET;
1511
1512         atomic_dec(&imp->imp_inflight);
1513         wake_up(&imp->imp_recovery_waitq);
1514         RETURN(rc);
1515 }
1516
1517 struct ptlrpc_replay_async_args {
1518         int praa_old_state;
1519         int praa_old_status;
1520 };
1521
1522 static int ptlrpc_replay_interpret(struct ptlrpc_request *req,
1523                                     void * data, int rc)
1524 {
1525         struct ptlrpc_replay_async_args *aa = data;
1526         struct obd_import *imp = req->rq_import;
1527         unsigned long flags;
1528
1529         atomic_dec(&imp->imp_replay_inflight);
1530         
1531         if (!req->rq_replied) {
1532                 CERROR("request replay timed out, restarting recovery\n");
1533                 GOTO(out, rc = -ETIMEDOUT);
1534         }
1535
1536 #if SWAB_PARANOIA
1537         /* Clear reply swab mask; this is a new reply in sender's byte order */
1538         req->rq_rep_swab_mask = 0;
1539 #endif
1540         LASSERT (req->rq_nob_received <= req->rq_replen);
1541         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
1542         if (rc) {
1543                 CERROR("unpack_rep failed: %d\n", rc);
1544                 GOTO(out, rc = -EPROTO);
1545         }
1546
1547         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR && 
1548             req->rq_repmsg->status == -ENOTCONN) 
1549                 GOTO(out, rc = req->rq_repmsg->status);
1550
1551         /* The transno had better not change over replay. */
1552         LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1553
1554         DEBUG_REQ(D_HA, req, "got rep");
1555
1556         /* let the callback do fixups, possibly including in the request */
1557         if (req->rq_replay_cb)
1558                 req->rq_replay_cb(req);
1559
1560         if (req->rq_replied && req->rq_repmsg->status != aa->praa_old_status) {
1561                 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
1562                           req->rq_repmsg->status, aa->praa_old_status);
1563         } else {
1564                 /* Put it back for re-replay. */
1565                 req->rq_repmsg->status = aa->praa_old_status;
1566         }
1567
1568         spin_lock_irqsave(&imp->imp_lock, flags);
1569         imp->imp_last_replay_transno = req->rq_transno;
1570         spin_unlock_irqrestore(&imp->imp_lock, flags);
1571
1572         /* continue with recovery */
1573         rc = ptlrpc_import_recovery_state_machine(imp);
1574  out:
1575         req->rq_send_state = aa->praa_old_state;
1576         
1577         if (rc != 0)
1578                 /* this replay failed, so restart recovery */
1579                 ptlrpc_connect_import(imp, NULL);
1580
1581         RETURN(rc);
1582 }
1583
1584
1585 int ptlrpc_replay_req(struct ptlrpc_request *req)
1586 {
1587         struct ptlrpc_replay_async_args *aa;
1588         ENTRY;
1589
1590         LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
1591
1592         /* Not handling automatic bulk replay yet (or ever?) */
1593         LASSERT(req->rq_bulk == NULL);
1594
1595         DEBUG_REQ(D_HA, req, "REPLAY");
1596
1597         LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
1598         aa = (struct ptlrpc_replay_async_args *)&req->rq_async_args;
1599         memset(aa, 0, sizeof *aa);
1600
1601         /* Prepare request to be resent with ptlrpcd */
1602         aa->praa_old_state = req->rq_send_state;
1603         req->rq_send_state = LUSTRE_IMP_REPLAY;
1604         req->rq_phase = RQ_PHASE_NEW;
1605         aa->praa_old_status = req->rq_repmsg->status;
1606         req->rq_status = 0;
1607
1608         req->rq_interpret_reply = ptlrpc_replay_interpret;
1609         atomic_inc(&req->rq_import->imp_replay_inflight);
1610         ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
1611
1612         ptlrpcd_add_req(req);
1613         RETURN(0);
1614 }
1615
1616 void ptlrpc_abort_inflight(struct obd_import *imp)
1617 {
1618         unsigned long flags;
1619         struct list_head *tmp, *n;
1620         ENTRY;
1621
1622         /* Make sure that no new requests get processed for this import.
1623          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1624          * this flag and then putting requests on sending_list or delayed_list.
1625          */
1626         spin_lock_irqsave(&imp->imp_lock, flags);
1627
1628         /* XXX locking?  Maybe we should remove each request with the list
1629          * locked?  Also, how do we know if the requests on the list are
1630          * being freed at this time?
1631          */
1632         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1633                 struct ptlrpc_request *req =
1634                         list_entry(tmp, struct ptlrpc_request, rq_list);
1635
1636                 DEBUG_REQ(D_HA, req, "inflight");
1637
1638                 spin_lock (&req->rq_lock);
1639                 if (req->rq_import_generation < imp->imp_generation) {
1640                         req->rq_err = 1;
1641                         ptlrpc_wake_client_req(req);
1642                 }
1643                 spin_unlock (&req->rq_lock);
1644         }
1645
1646         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1647                 struct ptlrpc_request *req =
1648                         list_entry(tmp, struct ptlrpc_request, rq_list);
1649
1650                 DEBUG_REQ(D_HA, req, "aborting waiting req");
1651
1652                 spin_lock (&req->rq_lock);
1653                 if (req->rq_import_generation < imp->imp_generation) {
1654                         req->rq_err = 1;
1655                         ptlrpc_wake_client_req(req);
1656                 }
1657                 spin_unlock (&req->rq_lock);
1658         }
1659
1660         /* Last chance to free reqs left on the replay list, but we
1661          * will still leak reqs that haven't comitted.  */
1662         if (imp->imp_replayable)
1663                 ptlrpc_free_committed(imp);
1664
1665         spin_unlock_irqrestore(&imp->imp_lock, flags);
1666
1667         EXIT;
1668 }
1669
1670 static __u64 ptlrpc_last_xid = 0;
1671 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1672
1673 __u64 ptlrpc_next_xid(void)
1674 {
1675         __u64 tmp;
1676         spin_lock(&ptlrpc_last_xid_lock);
1677         tmp = ++ptlrpc_last_xid;
1678         spin_unlock(&ptlrpc_last_xid_lock);
1679         return tmp;
1680 }
1681
1682