Whamcloud - gitweb
merge b_devel into HEAD, which will become 0.7.3
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <errno.h>
26 #include <signal.h>
27 #include <liblustre.h>
28 #endif
29
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_ha.h>
34 #include <linux/lustre_import.h>
35
36 #include "ptlrpc_internal.h"
37
38 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
39                         struct ptlrpc_client *cl)
40 {
41         cl->cli_request_portal = req_portal;
42         cl->cli_reply_portal   = rep_portal;
43         cl->cli_name           = name;
44 }
45
46 struct obd_uuid *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
47 {
48         return &req->rq_connection->c_remote_uuid;
49 }
50
51 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
52 {
53         struct ptlrpc_connection *c;
54         struct ptlrpc_peer peer;
55         int err;
56
57         err = ptlrpc_uuid_to_peer(uuid, &peer);
58         if (err != 0) {
59                 CERROR("cannot find peer %s!\n", uuid->uuid);
60                 return NULL;
61         }
62
63         c = ptlrpc_get_connection(&peer, uuid);
64         if (c) {
65                 memcpy(c->c_remote_uuid.uuid,
66                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
67                 c->c_epoch++;
68         }
69
70         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
71
72         return c;
73 }
74
75 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
76                                  struct obd_uuid *uuid)
77 {
78         struct ptlrpc_peer peer;
79         int err;
80
81         err = ptlrpc_uuid_to_peer(uuid, &peer);
82         if (err != 0) {
83                 CERROR("cannot find peer %s!\n", uuid->uuid);
84                 return;
85         }
86
87         memcpy(&conn->c_peer, &peer, sizeof (peer));
88         return;
89 }
90
91 static inline struct ptlrpc_bulk_desc *new_bulk(void)
92 {
93         struct ptlrpc_bulk_desc *desc;
94
95         OBD_ALLOC(desc, sizeof(*desc));
96         if (!desc)
97                 return NULL;
98
99         spin_lock_init(&desc->bd_lock);
100         init_waitqueue_head(&desc->bd_waitq);
101         INIT_LIST_HEAD(&desc->bd_page_list);
102         desc->bd_md_h = PTL_HANDLE_NONE;
103         desc->bd_me_h = PTL_HANDLE_NONE;
104
105         return desc;
106 }
107
108 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
109                                                int type, int portal)
110 {
111         struct obd_import *imp = req->rq_import;
112         struct ptlrpc_bulk_desc *desc;
113
114         LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
115
116         desc = new_bulk();
117         if (desc == NULL)
118                 RETURN(NULL);
119
120         desc->bd_import_generation = req->rq_import_generation;
121         desc->bd_import = class_import_get(imp);
122         desc->bd_req = req;
123         desc->bd_type = type;
124         desc->bd_portal = portal;
125
126         /* This makes req own desc, and free it when she frees herself */
127         req->rq_bulk = desc;
128
129         return desc;
130 }
131
132 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
133                                                int type, int portal)
134 {
135         struct obd_export *exp = req->rq_export;
136         struct ptlrpc_bulk_desc *desc;
137
138         LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
139
140         desc = new_bulk();
141         if (desc == NULL)
142                 RETURN(NULL);
143
144         desc->bd_export = class_export_get(exp);
145         desc->bd_req = req;
146         desc->bd_type = type;
147         desc->bd_portal = portal;
148
149         /* NB we don't assign rq_bulk here; server-side requests are
150          * re-used, and the handler frees the bulk desc explicitly. */
151
152         return desc;
153 }
154
155 int ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
156                           struct page *page, int pageoffset, int len)
157 {
158         struct ptlrpc_bulk_page *bulk;
159
160         OBD_ALLOC(bulk, sizeof(*bulk));
161         if (bulk == NULL)
162                 return -ENOMEM;
163
164         LASSERT(page != NULL);
165         LASSERT(pageoffset >= 0);
166         LASSERT(len > 0);
167         LASSERT(pageoffset + len <= PAGE_SIZE);
168
169         bulk->bp_page = page;
170         bulk->bp_pageoffset = pageoffset;
171         bulk->bp_buflen = len;
172
173         bulk->bp_desc = desc;
174         list_add_tail(&bulk->bp_link, &desc->bd_page_list);
175         desc->bd_page_count++;
176         return 0;
177 }
178
179 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
180 {
181         struct list_head *tmp, *next;
182         ENTRY;
183
184         LASSERT(desc != NULL);
185         LASSERT(desc->bd_page_count != 0x5a5a5a5a); /* not freed already */
186         LASSERT(!desc->bd_network_rw);         /* network hands off or */
187
188         list_for_each_safe(tmp, next, &desc->bd_page_list) {
189                 struct ptlrpc_bulk_page *bulk;
190                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
191                 ptlrpc_free_bulk_page(bulk);
192         }
193
194         LASSERT(desc->bd_page_count == 0);
195         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
196
197         if (desc->bd_export)
198                 class_export_put(desc->bd_export);
199         else
200                 class_import_put(desc->bd_import);
201
202         OBD_FREE(desc, sizeof(*desc));
203         EXIT;
204 }
205
206 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
207 {
208         LASSERT(bulk != NULL);
209
210         list_del(&bulk->bp_link);
211         bulk->bp_desc->bd_page_count--;
212         OBD_FREE(bulk, sizeof(*bulk));
213 }
214
215 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
216                                        int count, int *lengths, char **bufs)
217 {
218         struct ptlrpc_request *request;
219         int rc;
220         ENTRY;
221
222         LASSERT((unsigned long)imp > 0x1000);
223
224         OBD_ALLOC(request, sizeof(*request));
225         if (!request) {
226                 CERROR("request allocation out of memory\n");
227                 RETURN(NULL);
228         }
229
230         rc = lustre_pack_msg(count, lengths, bufs,
231                              &request->rq_reqlen, &request->rq_reqmsg);
232         if (rc) {
233                 CERROR("cannot pack request %d\n", rc);
234                 OBD_FREE(request, sizeof(*request));
235                 RETURN(NULL);
236         }
237
238         request->rq_timeout = obd_timeout;
239         request->rq_level = LUSTRE_CONN_FULL;
240         request->rq_type = PTL_RPC_MSG_REQUEST;
241         request->rq_import = class_import_get(imp);
242         request->rq_phase = RQ_PHASE_NEW;
243
244         /* XXX FIXME bug 249 */
245         request->rq_request_portal = imp->imp_client->cli_request_portal;
246         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
247
248         request->rq_connection = ptlrpc_connection_addref(imp->imp_connection);
249
250         spin_lock_init(&request->rq_lock);
251         INIT_LIST_HEAD(&request->rq_list);
252         init_waitqueue_head(&request->rq_wait_for_rep);
253         request->rq_xid = ptlrpc_next_xid();
254         atomic_set(&request->rq_refcount, 1);
255
256         request->rq_reqmsg->opc = opcode;
257         request->rq_reqmsg->flags = 0;
258
259         RETURN(request);
260 }
261
262 struct ptlrpc_request_set *ptlrpc_prep_set(void)
263 {
264         struct ptlrpc_request_set *set;
265
266         OBD_ALLOC(set, sizeof *set);
267         if (!set)
268                 RETURN(NULL);
269         INIT_LIST_HEAD(&set->set_requests);
270         init_waitqueue_head(&set->set_waitq);
271         set->set_remaining = 0;
272
273         RETURN(set);
274 }
275
276 /* Finish with this set; opposite of prep_set. */
277 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
278 {
279         struct list_head *tmp;
280         struct list_head *next;
281         int               expected_phase;
282         int               n = 0;
283         ENTRY;
284
285         /* Requests on the set should either all be completed, or all be new */
286         expected_phase = (set->set_remaining == 0) ?
287                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
288         list_for_each (tmp, &set->set_requests) {
289                 struct ptlrpc_request *req =
290                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
291
292                 LASSERT(req->rq_phase == expected_phase);
293                 n++;
294         }
295
296         LASSERT(set->set_remaining == 0 || set->set_remaining == n);
297
298         list_for_each_safe(tmp, next, &set->set_requests) {
299                 struct ptlrpc_request *req =
300                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
301                 list_del_init(&req->rq_set_chain);
302
303                 LASSERT(req->rq_phase == expected_phase);
304
305                 if (req->rq_phase == RQ_PHASE_NEW) {
306
307                         if (req->rq_interpret_reply != NULL) {
308                                 int (*interpreter)(struct ptlrpc_request *,
309                                                    void *, int) =
310                                         req->rq_interpret_reply;
311
312                                 /* higher level (i.e. LOV) failed;
313                                  * let the sub reqs clean up */
314                                 req->rq_status = -EBADR;
315                                 interpreter(req, &req->rq_async_args,
316                                             req->rq_status);
317                         }
318                         set->set_remaining--;
319                 }
320
321                 req->rq_set = NULL;
322                 ptlrpc_req_finished (req);
323         }
324
325         LASSERT(set->set_remaining == 0);
326
327         OBD_FREE(set, sizeof(*set));
328         EXIT;
329 }
330
331 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
332                         struct ptlrpc_request *req)
333 {
334         /* The set takes over the caller's request reference */
335         list_add_tail(&req->rq_set_chain, &set->set_requests);
336         req->rq_set = set;
337         set->set_remaining++;
338 }
339
340 static int ptlrpc_check_reply(struct ptlrpc_request *req)
341 {
342         unsigned long flags;
343         int rc = 0;
344         ENTRY;
345
346         /* serialise with network callback */
347         spin_lock_irqsave (&req->rq_lock, flags);
348
349         if (req->rq_replied) {
350                 DEBUG_REQ(D_NET, req, "REPLIED:");
351                 GOTO(out, rc = 1);
352         }
353
354         if (req->rq_err) {
355                 DEBUG_REQ(D_ERROR, req, "ABORTED:");
356                 GOTO(out, rc = 1);
357         }
358
359         if (req->rq_resend) {
360                 DEBUG_REQ(D_ERROR, req, "RESEND:");
361                 GOTO(out, rc = 1);
362         }
363
364         if (req->rq_restart) {
365                 DEBUG_REQ(D_ERROR, req, "RESTART:");
366                 GOTO(out, rc = 1);
367         }
368         EXIT;
369  out:
370         spin_unlock_irqrestore (&req->rq_lock, flags);
371         DEBUG_REQ(D_NET, req, "rc = %d for", rc);
372         return rc;
373 }
374
375 static int ptlrpc_check_status(struct ptlrpc_request *req)
376 {
377         int err;
378         ENTRY;
379
380         err = req->rq_repmsg->status;
381         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
382                 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR (%d)", err);
383                 if (err >= 0)
384                         CERROR("Error Reply has >= zero status\n");
385                 RETURN(err < 0 ? err : -EINVAL);
386         }
387
388         if (err < 0) {
389                 DEBUG_REQ(D_INFO, req, "status is %d", err);
390         } else if (err > 0) {
391                 /* XXX: translate this error from net to host */
392                 DEBUG_REQ(D_INFO, req, "status is %d", err);
393         }
394
395         RETURN(err);
396 }
397
398 #warning this needs to change after robert fixes eviction handling
399 static int after_reply(struct ptlrpc_request *req, int *restartp)
400 {
401         unsigned long flags;
402         struct obd_import *imp = req->rq_import;
403         int rc;
404         ENTRY;
405
406         LASSERT(!req->rq_receiving_reply);
407         LASSERT(req->rq_replied);
408
409         if (restartp != NULL)
410                 *restartp = 0;
411
412         /* NB Until this point, the whole of the incoming message,
413          * including buflens, status etc is in the sender's byte order. */
414
415 #if SWAB_PARANOIA
416         /* Clear reply swab mask; this is a new reply in sender's byte order */
417         req->rq_rep_swab_mask = 0;
418 #endif
419         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
420         if (rc) {
421                 CERROR("unpack_rep failed: %d\n", rc);
422                 RETURN(-EPROTO);
423         }
424
425         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
426             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
427                 CERROR("invalid packet type received (type=%u)\n",
428                        req->rq_repmsg->type);
429                 RETURN(-EPROTO);
430         }
431
432         /* Store transno in reqmsg for replay. */
433         req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
434
435         rc = ptlrpc_check_status(req);
436
437         /* Either we've been evicted, or the server has failed for
438          * some reason. Try to reconnect, and if that fails, punt to the
439          * upcall. */
440         if (rc == -ENOTCONN) {
441                 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
442                     imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
443                         RETURN(-ENOTCONN);
444                 }
445
446                 ptlrpc_request_handle_eviction(req);
447
448                 if (req->rq_err)
449                         RETURN(-EIO);
450
451                 if (req->rq_no_resend)
452                         RETURN(rc); /* -ENOTCONN */
453
454                 if (req->rq_resend) {
455                         if (restartp == NULL)
456                                 LBUG(); /* async resend not supported yet */
457                         spin_lock_irqsave (&req->rq_lock, flags);
458                         req->rq_resend = 0;
459                         spin_unlock_irqrestore (&req->rq_lock, flags);
460                         *restartp = 1;
461                         lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
462                         DEBUG_REQ(D_HA, req, "resending: ");
463                         RETURN(0);
464                 }
465
466                 CERROR("request should be err or resend: %p\n", req);
467                 LBUG();
468         }
469
470         if (req->rq_import->imp_replayable) {
471                 spin_lock_irqsave(&imp->imp_lock, flags);
472                 if ((req->rq_replay || req->rq_transno != 0) && rc >= 0)
473                         ptlrpc_retain_replayable_request(req, imp);
474
475                 if (req->rq_transno > imp->imp_max_transno)
476                         imp->imp_max_transno = req->rq_transno;
477
478                 /* Replay-enabled imports return commit-status information. */
479                 if (req->rq_repmsg->last_committed)
480                         imp->imp_peer_committed_transno =
481                                 req->rq_repmsg->last_committed;
482                 ptlrpc_free_committed(imp);
483                 spin_unlock_irqrestore(&imp->imp_lock, flags);
484         }
485
486         RETURN(rc);
487 }
488
489 int ptlrpc_check_set(struct ptlrpc_request_set *set)
490 {
491         unsigned long flags;
492         struct list_head *tmp;
493         ENTRY;
494
495         if (set->set_remaining == 0)
496                 RETURN(1);
497
498         list_for_each(tmp, &set->set_requests) {
499                 struct ptlrpc_request *req =
500                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
501                 struct obd_import *imp = req->rq_import;
502                 int rc = 0;
503
504                 if (!(req->rq_phase == RQ_PHASE_RPC ||
505                       req->rq_phase == RQ_PHASE_BULK ||
506                       req->rq_phase == RQ_PHASE_INTERPRET ||
507                       req->rq_phase == RQ_PHASE_COMPLETE)) {
508                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
509                         LBUG();
510                 }
511
512                 if (req->rq_phase == RQ_PHASE_COMPLETE)
513                         continue;
514
515                 if (req->rq_phase == RQ_PHASE_INTERPRET)
516                         GOTO(interpret, req->rq_status);
517
518                 if (req->rq_err) {
519                         ptlrpc_unregister_reply(req);
520                         if (req->rq_status == 0)
521                                 req->rq_status = -EIO;
522                         req->rq_phase = RQ_PHASE_INTERPRET;
523
524                         spin_lock_irqsave(&imp->imp_lock, flags);
525                         list_del_init(&req->rq_list);
526                         spin_unlock_irqrestore(&imp->imp_lock, flags);
527
528                         GOTO(interpret, req->rq_status);
529                 }
530
531                 if (req->rq_intr) {
532                         /* NB could be on delayed list */
533                         ptlrpc_unregister_reply(req);
534                         req->rq_status = -EINTR;
535                         req->rq_phase = RQ_PHASE_INTERPRET;
536
537                         spin_lock_irqsave(&imp->imp_lock, flags);
538                         list_del_init(&req->rq_list);
539                         spin_unlock_irqrestore(&imp->imp_lock, flags);
540
541                         GOTO(interpret, req->rq_status);
542                 }
543
544                 if (req->rq_phase == RQ_PHASE_RPC) {
545                         int do_restart = 0;
546                         if (req->rq_waiting || req->rq_resend) {
547                                 spin_lock_irqsave(&imp->imp_lock, flags);
548
549                                 if (req->rq_level > imp->imp_level) {
550                                         spin_unlock_irqrestore(&imp->imp_lock,
551                                                                flags);
552                                         continue;
553                                 }
554
555                                 list_del(&req->rq_list);
556                                 list_add_tail(&req->rq_list,
557                                               &imp->imp_sending_list);
558
559                                 if (req->rq_import_generation <
560                                     imp->imp_generation) {
561                                         req->rq_status = -EIO;
562                                         req->rq_phase = RQ_PHASE_INTERPRET;
563                                         spin_unlock_irqrestore(&imp->imp_lock,
564                                                                flags);
565                                         GOTO(interpret, req->rq_status);
566                                 }
567                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
568
569                                 req->rq_waiting = 0;
570                                 if (req->rq_resend) {
571                                         lustre_msg_add_flags(req->rq_reqmsg,
572                                                              MSG_RESENT);
573                                         spin_lock_irqsave(&req->rq_lock, flags);
574                                         req->rq_resend = 0;
575                                         spin_unlock_irqrestore(&req->rq_lock,
576                                                                flags);
577
578                                         ptlrpc_unregister_reply(req);
579                                         if (req->rq_bulk)
580                                                 ptlrpc_unregister_bulk(req);
581                                 }
582
583                                 rc = ptl_send_rpc(req);
584                                 if (rc) {
585                                         req->rq_status = rc;
586                                         req->rq_phase = RQ_PHASE_INTERPRET;
587                                         GOTO(interpret, req->rq_status);
588                                 }
589
590                         }
591
592                         /* Ensure the network callback returned */
593                         spin_lock_irqsave (&req->rq_lock, flags);
594                         if (!req->rq_replied) {
595                                 spin_unlock_irqrestore (&req->rq_lock, flags);
596                                 continue;
597                         }
598                         spin_unlock_irqrestore (&req->rq_lock, flags);
599
600                         spin_lock_irqsave(&imp->imp_lock, flags);
601                         list_del_init(&req->rq_list);
602                         spin_unlock_irqrestore(&imp->imp_lock, flags);
603
604                         req->rq_status = after_reply(req, &do_restart);
605                         if (do_restart) {
606                                 spin_lock_irqsave (&req->rq_lock, flags);
607                                 req->rq_resend = 1; /* ugh */
608                                 spin_unlock_irqrestore (&req->rq_lock, flags);
609                                 continue;
610                         }
611
612                         /* If there is no bulk associated with this request,
613                          * then we're done and should let the interpreter
614                          * process the reply.  Similarly if the RPC returned
615                          * an error, and therefore the bulk will never arrive.
616                          */
617                         if (req->rq_bulk == NULL || req->rq_status != 0) {
618                                 req->rq_phase = RQ_PHASE_INTERPRET;
619                                 GOTO(interpret, req->rq_status);
620                         }
621
622                         req->rq_phase = RQ_PHASE_BULK;
623                 }
624
625                 LASSERT(req->rq_phase == RQ_PHASE_BULK);
626                 if (!ptlrpc_bulk_complete (req->rq_bulk))
627                         continue;
628
629                 req->rq_phase = RQ_PHASE_INTERPRET;
630
631         interpret:
632                 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
633                 LASSERT(!req->rq_receiving_reply);
634
635                 ptlrpc_unregister_reply(req);
636                 if (req->rq_bulk != NULL)
637                         ptlrpc_unregister_bulk (req);
638
639                 if (req->rq_interpret_reply != NULL) {
640                         int (*interpreter)(struct ptlrpc_request *,void *,int) =
641                                 req->rq_interpret_reply;
642                         req->rq_status = interpreter(req, &req->rq_async_args,
643                                                      req->rq_status);
644                 }
645
646                 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:"
647                        "opc %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
648                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
649                        req->rq_xid,
650                        imp->imp_connection->c_peer.peer_ni->pni_name,
651                        imp->imp_connection->c_peer.peer_nid,
652                        req->rq_reqmsg->opc);
653
654                 req->rq_phase = RQ_PHASE_COMPLETE;
655                 set->set_remaining--;
656         }
657
658         RETURN(set->set_remaining == 0);
659 }
660
661 int ptlrpc_expire_one_request(struct ptlrpc_request *req)
662 {
663         unsigned long      flags;
664         struct obd_import *imp = req->rq_import;
665         ENTRY;
666
667         DEBUG_REQ(D_ERROR, req, "timeout");
668
669         spin_lock_irqsave (&req->rq_lock, flags);
670         req->rq_timedout = 1;
671         spin_unlock_irqrestore (&req->rq_lock, flags);
672
673         ptlrpc_unregister_reply (req);
674
675         if (imp == NULL) {
676                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
677                 RETURN(1);
678         }
679
680         /* The DLM server doesn't want recovery run on its imports. */
681         if (imp->imp_dlm_fake)
682                 RETURN(1);
683
684         /* If this request is for recovery or other primordial tasks,
685          * don't go back to sleep, and don't start recovery again.. */
686         if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
687             imp->imp_obd->obd_no_recov)
688                 RETURN(1);
689
690         ptlrpc_fail_import(imp, req->rq_import_generation);
691
692         RETURN(0);
693 }
694
695 static int expired_set(void *data)
696 {
697         struct ptlrpc_request_set *set = data;
698         struct list_head          *tmp;
699         time_t                     now = LTIME_S (CURRENT_TIME);
700         ENTRY;
701
702         LASSERT(set != NULL);
703
704         /* A timeout expired; see which reqs it applies to... */
705         list_for_each (tmp, &set->set_requests) {
706                 struct ptlrpc_request *req =
707                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
708
709                 /* request in-flight? */
710                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
711                       (req->rq_phase == RQ_PHASE_BULK)))
712                         continue;
713
714                 if (req->rq_timedout ||           /* already dealt with */
715                     req->rq_sent + req->rq_timeout > now) /* not expired */
716                         continue;
717
718                 /* deal with this guy */
719                 ptlrpc_expire_one_request (req);
720         }
721
722         /* When waiting for a whole set, we always to break out of the
723          * sleep so we can recalculate the timeout, or enable interrupts
724          * iff everyone's timed out.
725          */
726         RETURN(1);
727 }
728
729 static void interrupted_set(void *data)
730 {
731         struct ptlrpc_request_set *set = data;
732         struct list_head *tmp;
733         unsigned long flags;
734
735         LASSERT(set != NULL);
736         CERROR("INTERRUPTED SET %p\n", set);
737
738         list_for_each(tmp, &set->set_requests) {
739                 struct ptlrpc_request *req =
740                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
741
742                 if (req->rq_phase != RQ_PHASE_RPC)
743                         continue;
744
745                 spin_lock_irqsave (&req->rq_lock, flags);
746                 req->rq_intr = 1;
747                 spin_unlock_irqrestore (&req->rq_lock, flags);
748         }
749 }
750
751 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
752 {
753         struct list_head      *tmp;
754         struct obd_import     *imp;
755         struct ptlrpc_request *req;
756         struct l_wait_info     lwi;
757         unsigned long          flags;
758         int                    rc;
759         time_t                 now;
760         time_t                 deadline;
761         int                    timeout;
762         ENTRY;
763
764         SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
765         LASSERT(!list_empty(&set->set_requests));
766         list_for_each(tmp, &set->set_requests) {
767                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
768
769                 LASSERT(req->rq_level == LUSTRE_CONN_FULL);
770                 LASSERT(req->rq_phase == RQ_PHASE_NEW);
771                 req->rq_phase = RQ_PHASE_RPC;
772
773                 imp = req->rq_import;
774                 spin_lock_irqsave(&imp->imp_lock, flags);
775
776                 if (imp->imp_invalid) {
777                         spin_unlock_irqrestore(&imp->imp_lock, flags);
778                         req->rq_status = -EIO;
779                         req->rq_phase = RQ_PHASE_INTERPRET;
780                         continue;
781                 }
782
783                 req->rq_import_generation = imp->imp_generation;
784
785                 if (req->rq_level > imp->imp_level) {
786                         if (req->rq_no_recov || imp->imp_obd->obd_no_recov ||
787                             imp->imp_dlm_fake) {
788                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
789                                 req->rq_status = -EWOULDBLOCK;
790                                 req->rq_phase = RQ_PHASE_INTERPRET;
791                                 continue;
792                         }
793
794                         spin_lock (&req->rq_lock);
795                         req->rq_waiting = 1;
796                         spin_unlock (&req->rq_lock);
797                         LASSERT(list_empty (&req->rq_list));
798                         // list_del(&req->rq_list);
799                         list_add_tail(&req->rq_list, &imp->imp_delayed_list);
800                         spin_unlock_irqrestore(&imp->imp_lock, flags);
801                         continue;
802                 }
803
804                 /* XXX this is the same as ptlrpc_queue_wait */
805                 LASSERT(list_empty(&req->rq_list));
806                 list_add_tail(&req->rq_list, &imp->imp_sending_list);
807                 spin_unlock_irqrestore(&imp->imp_lock, flags);
808
809                 req->rq_reqmsg->status = current->pid;
810                 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc"
811                        " %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
812                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
813                        req->rq_xid,
814                        imp->imp_connection->c_peer.peer_ni->pni_name,
815                        imp->imp_connection->c_peer.peer_nid,
816                        req->rq_reqmsg->opc);
817
818                 rc = ptl_send_rpc(req);
819                 if (rc) {
820                         req->rq_status = rc;
821                         req->rq_phase = RQ_PHASE_INTERPRET;
822                 }
823         }
824
825         do {
826                 now = LTIME_S (CURRENT_TIME);
827                 timeout = 0;
828                 list_for_each (tmp, &set->set_requests) {
829                         req = list_entry(tmp, struct ptlrpc_request,
830                                          rq_set_chain);
831
832                         /* request in-flight? */
833                         if (!((req->rq_phase == RQ_PHASE_RPC &&
834                                !req->rq_waiting) ||
835                               (req->rq_phase == RQ_PHASE_BULK)))
836                                 continue;
837
838                         if (req->rq_timedout)   /* already timed out */
839                                 continue;
840
841                         deadline = req->rq_sent + req->rq_timeout;
842                         if (deadline <= now)    /* actually expired already */
843                                 timeout = 1;    /* ASAP */
844                         else if (timeout == 0 || timeout > deadline - now)
845                                 timeout = deadline - now;
846                 }
847
848                 /* wait until all complete, interrupted, or an in-flight
849                  * req times out */
850                 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
851                        set, timeout);
852                 lwi = LWI_TIMEOUT_INTR((timeout ? timeout : 1) * HZ,
853                                        expired_set, interrupted_set, set);
854                 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
855
856                 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
857
858                 /* -EINTR => all requests have been flagged rq_intr so next
859                  * check completes.
860                  * -ETIMEOUTD => someone timed out.  When all reqs have
861                  * timed out, signals are enabled allowing completion with
862                  * EINTR.
863                  * I don't really care if we go once more round the loop in
864                  * the error cases -eeb. */
865         } while (rc != 0);
866
867         LASSERT(set->set_remaining == 0);
868
869         rc = 0;
870         list_for_each(tmp, &set->set_requests) {
871                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
872
873                 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
874                 if (req->rq_status != 0)
875                         rc = req->rq_status;
876         }
877
878         if (set->set_interpret != NULL) {
879                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
880                         set->set_interpret;
881                 rc = interpreter (set, &set->set_args, rc);
882         }
883
884         RETURN(rc);
885 }
886
887 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
888 {
889         ENTRY;
890         if (request == NULL) {
891                 EXIT;
892                 return;
893         }
894
895         LASSERT(!request->rq_receiving_reply);
896
897         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
898          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
899         if (request->rq_import != NULL) {
900                 unsigned long flags = 0;
901                 if (!locked)
902                         spin_lock_irqsave(&request->rq_import->imp_lock, flags);
903                 list_del_init(&request->rq_list);
904                 if (!locked)
905                         spin_unlock_irqrestore(&request->rq_import->imp_lock,
906                                                flags);
907         }
908
909         if (atomic_read(&request->rq_refcount) != 0) {
910                 DEBUG_REQ(D_ERROR, request,
911                           "freeing request with nonzero refcount");
912                 LBUG();
913         }
914
915         if (request->rq_repmsg != NULL) {
916                 OBD_FREE(request->rq_repmsg, request->rq_replen);
917                 request->rq_repmsg = NULL;
918         }
919         if (request->rq_reqmsg != NULL) {
920                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
921                 request->rq_reqmsg = NULL;
922         }
923         if (request->rq_export != NULL) {
924                 class_export_put(request->rq_export);
925                 request->rq_export = NULL;
926         }
927         if (request->rq_import != NULL) {
928                 class_import_put(request->rq_import);
929                 request->rq_import = NULL;
930         }
931         if (request->rq_bulk != NULL)
932                 ptlrpc_free_bulk(request->rq_bulk);
933
934         ptlrpc_put_connection(request->rq_connection);
935         OBD_FREE(request, sizeof(*request));
936         EXIT;
937 }
938
939 void ptlrpc_free_req(struct ptlrpc_request *request)
940 {
941         __ptlrpc_free_req(request, 0);
942 }
943
944 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
945 {
946         ENTRY;
947         if (request == NULL)
948                 RETURN(1);
949
950         if (request == (void *)(long)(0x5a5a5a5a5a5a5a5a) ||
951             request->rq_obd == (void *)(long)(0x5a5a5a5a5a5a5a5a)) {
952                 CERROR("dereferencing freed request (bug 575)\n");
953                 LBUG();
954                 RETURN(1);
955         }
956
957         DEBUG_REQ(D_INFO, request, "refcount now %u",
958                   atomic_read(&request->rq_refcount) - 1);
959
960         if (atomic_dec_and_test(&request->rq_refcount)) {
961                 __ptlrpc_free_req(request, locked);
962                 RETURN(1);
963         }
964
965         RETURN(0);
966 }
967
968 void ptlrpc_req_finished(struct ptlrpc_request *request)
969 {
970         __ptlrpc_req_finished(request, 0);
971 }
972
973 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
974 {
975         OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
976         request->rq_reqmsg = NULL;
977         request->rq_reqlen = 0;
978 }
979
980 /* Disengage the client's reply buffer from the network
981  * NB does _NOT_ unregister any client-side bulk.
982  * IDEMPOTENT, but _not_ safe against concurrent callers.
983  * The request owner (i.e. the thread doing the I/O) must call...
984  */
985 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
986 {
987         unsigned long flags;
988         int           rc;
989         ENTRY;
990
991         LASSERT(!in_interrupt ());             /* might sleep */
992
993         spin_lock_irqsave (&request->rq_lock, flags);
994         if (!request->rq_receiving_reply) {     /* not waiting for a reply */
995                 spin_unlock_irqrestore (&request->rq_lock, flags);
996                 EXIT;
997                 /* NB reply buffer not freed here */
998                 return;
999         }
1000
1001         LASSERT(!request->rq_replied);         /* callback hasn't completed */
1002         spin_unlock_irqrestore (&request->rq_lock, flags);
1003
1004         rc = PtlMDUnlink (request->rq_reply_md_h);
1005         switch (rc) {
1006         default:
1007                 LBUG ();
1008
1009         case PTL_OK:                            /* unlinked before completion */
1010                 LASSERT(request->rq_receiving_reply);
1011                 LASSERT(!request->rq_replied);
1012                 spin_lock_irqsave (&request->rq_lock, flags);
1013                 request->rq_receiving_reply = 0;
1014                 spin_unlock_irqrestore (&request->rq_lock, flags);
1015                 OBD_FREE(request->rq_repmsg, request->rq_replen);
1016                 request->rq_repmsg = NULL;
1017                 EXIT;
1018                 return;
1019
1020         case PTL_MD_INUSE:                      /* callback in progress */
1021                 for (;;) {
1022                         /* Network access will complete in finite time but
1023                          * the timeout lets us CERROR for visibility */
1024                         struct l_wait_info lwi = LWI_TIMEOUT(10*HZ, NULL, NULL);
1025
1026                         rc = l_wait_event (request->rq_wait_for_rep,
1027                                            request->rq_replied, &lwi);
1028                         LASSERT(rc == 0 || rc == -ETIMEDOUT);
1029                         if (rc == 0) {
1030                                 spin_lock_irqsave (&request->rq_lock, flags);
1031                                 /* Ensure the callback has completed scheduling
1032                                  * me and taken its hands off the request */
1033                                 spin_unlock_irqrestore(&request->rq_lock,flags);
1034                                 break;
1035                         }
1036
1037                         CERROR ("Unexpectedly long timeout: req %p\n", request);
1038                 }
1039                 /* fall through */
1040
1041         case PTL_INV_MD:                        /* callback completed */
1042                 LASSERT(!request->rq_receiving_reply);
1043                 LASSERT(request->rq_replied);
1044                 EXIT;
1045                 return;
1046         }
1047         /* Not Reached */
1048 }
1049
1050 /* caller must hold imp->imp_lock */
1051 void ptlrpc_free_committed(struct obd_import *imp)
1052 {
1053         struct list_head *tmp, *saved;
1054         struct ptlrpc_request *req;
1055         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1056         ENTRY;
1057
1058         LASSERT(imp != NULL);
1059
1060 #ifdef CONFIG_SMP
1061         LASSERT(spin_is_locked(&imp->imp_lock));
1062 #endif
1063
1064         CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1065                imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1066
1067         list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1068                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1069
1070                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
1071                 LASSERT(req != last_req);
1072                 last_req = req;
1073
1074                 if (req->rq_import_generation < imp->imp_generation) {
1075                         DEBUG_REQ(D_HA, req, "freeing request with old gen");
1076                         GOTO(free_req, 0);
1077                 }
1078
1079                 if (req->rq_replay) {
1080                         DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1081                         continue;
1082                 }
1083
1084                 /* not yet committed */
1085                 if (req->rq_transno > imp->imp_peer_committed_transno) {
1086                         DEBUG_REQ(D_HA, req, "stopping search");
1087                         break;
1088                 }
1089
1090                 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1091                           imp->imp_peer_committed_transno);
1092 free_req:
1093                 list_del_init(&req->rq_list);
1094                 __ptlrpc_req_finished(req, 1);
1095         }
1096
1097         EXIT;
1098         return;
1099 }
1100
1101 void ptlrpc_cleanup_client(struct obd_import *imp)
1102 {
1103         ENTRY;
1104         EXIT;
1105         return;
1106 }
1107
1108 void ptlrpc_resend_req(struct ptlrpc_request *req)
1109 {
1110         unsigned long flags;
1111
1112         DEBUG_REQ(D_HA, req, "resending");
1113         req->rq_reqmsg->handle.cookie = 0;
1114         ptlrpc_put_connection(req->rq_connection);
1115         req->rq_connection =
1116                 ptlrpc_connection_addref(req->rq_import->imp_connection);
1117         req->rq_status = -EAGAIN;
1118
1119         spin_lock_irqsave (&req->rq_lock, flags);
1120         req->rq_resend = 1;
1121         req->rq_timedout = 0;
1122         if (req->rq_set != NULL)
1123                 wake_up (&req->rq_set->set_waitq);
1124         else
1125                 wake_up(&req->rq_wait_for_rep);
1126         spin_unlock_irqrestore (&req->rq_lock, flags);
1127 }
1128
1129 /* XXX: this function and rq_status are currently unused */
1130 void ptlrpc_restart_req(struct ptlrpc_request *req)
1131 {
1132         unsigned long flags;
1133
1134         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1135         req->rq_status = -ERESTARTSYS;
1136
1137         spin_lock_irqsave (&req->rq_lock, flags);
1138         req->rq_restart = 1;
1139         req->rq_timedout = 0;
1140         if (req->rq_set != NULL)
1141                 wake_up (&req->rq_set->set_waitq);
1142         else
1143                 wake_up(&req->rq_wait_for_rep);
1144         spin_unlock_irqrestore (&req->rq_lock, flags);
1145 }
1146
1147 static int expired_request(void *data)
1148 {
1149         struct ptlrpc_request *req = data;
1150         ENTRY;
1151
1152         RETURN(ptlrpc_expire_one_request(req));
1153 }
1154
1155 static void interrupted_request(void *data)
1156 {
1157         unsigned long flags;
1158
1159         struct ptlrpc_request *req = data;
1160         DEBUG_REQ(D_HA, req, "request interrupted");
1161         spin_lock_irqsave (&req->rq_lock, flags);
1162         req->rq_intr = 1;
1163         spin_unlock_irqrestore (&req->rq_lock, flags);
1164 }
1165
1166 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1167 {
1168         ENTRY;
1169         atomic_inc(&req->rq_refcount);
1170         RETURN(req);
1171 }
1172
1173 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1174                                       struct obd_import *imp)
1175 {
1176         struct list_head *tmp;
1177
1178 #ifdef CONFIG_SMP
1179         LASSERT(spin_is_locked(&imp->imp_lock));
1180 #endif
1181
1182         LASSERT(imp->imp_replayable);
1183         /* Balanced in ptlrpc_free_committed, usually. */
1184         ptlrpc_request_addref(req);
1185         list_for_each_prev(tmp, &imp->imp_replay_list) {
1186                 struct ptlrpc_request *iter =
1187                         list_entry(tmp, struct ptlrpc_request, rq_list);
1188
1189                 /* We may have duplicate transnos if we create and then
1190                  * open a file, or for closes retained if to match creating
1191                  * opens, so use req->rq_xid as a secondary key.
1192                  * (See bugs 684, 685, and 428.)
1193                  * XXX no longer needed, but all opens need transnos!
1194                  */
1195                 if (iter->rq_transno > req->rq_transno)
1196                         continue;
1197
1198                 if (iter->rq_transno == req->rq_transno) {
1199                         LASSERT(iter->rq_xid != req->rq_xid);
1200                         if (iter->rq_xid > req->rq_xid)
1201                                 continue;
1202                 }
1203
1204                 list_add(&req->rq_list, &iter->rq_list);
1205                 return;
1206         }
1207
1208         list_add_tail(&req->rq_list, &imp->imp_replay_list);
1209 }
1210
1211 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1212 {
1213         int rc = 0;
1214         int brc;
1215         struct l_wait_info lwi;
1216         struct obd_import *imp = req->rq_import;
1217         struct obd_device *obd = imp->imp_obd;
1218         unsigned long flags;
1219         int do_restart = 0;
1220         int timeout = 0;
1221         ENTRY;
1222
1223         LASSERT(req->rq_set == NULL);
1224         LASSERT(!req->rq_receiving_reply);
1225
1226         /* for distributed debugging */
1227         req->rq_reqmsg->status = current->pid;
1228         LASSERT(imp->imp_obd != NULL);
1229         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc "
1230                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1231                imp->imp_obd->obd_uuid.uuid,
1232                req->rq_reqmsg->status, req->rq_xid,
1233                imp->imp_connection->c_peer.peer_ni->pni_name,
1234                imp->imp_connection->c_peer.peer_nid,
1235                req->rq_reqmsg->opc);
1236
1237         /* Mark phase here for a little debug help */
1238         req->rq_phase = RQ_PHASE_RPC;
1239
1240         spin_lock_irqsave(&imp->imp_lock, flags);
1241         req->rq_import_generation = imp->imp_generation;
1242 restart:
1243         /*
1244          * If the import has been invalidated (such as by an OST failure), the
1245          * request must fail with -EIO.  Recovery requests are allowed to go
1246          * through, though, so that they have a chance to revalidate the
1247          * import.
1248          */
1249         if (req->rq_import->imp_invalid && req->rq_level == LUSTRE_CONN_FULL) {
1250                 DEBUG_REQ(D_ERROR, req, "IMP_INVALID:");
1251                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1252                 GOTO(out, rc = -EIO);
1253         }
1254
1255         if (req->rq_import_generation < imp->imp_generation) {
1256                 DEBUG_REQ(D_ERROR, req, "req old gen:");
1257                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1258                 GOTO(out, rc = -EIO);
1259         }
1260
1261         if (req->rq_level > imp->imp_level) {
1262                 list_del(&req->rq_list);
1263                 if (req->rq_no_recov || obd->obd_no_recov ||
1264                     imp->imp_dlm_fake) {
1265                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1266                         GOTO(out, rc = -EWOULDBLOCK);
1267                 }
1268
1269                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1270                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1271
1272                 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%d > %d)",
1273                           current->comm, req->rq_level, imp->imp_level);
1274                 lwi = LWI_INTR(NULL, NULL);
1275                 rc = l_wait_event(req->rq_wait_for_rep,
1276                                   (req->rq_level <= imp->imp_level ||
1277                                    req->rq_err),
1278                                   &lwi);
1279                 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%d > %d or %d == 1)",
1280                           current->comm, imp->imp_level, req->rq_level,
1281                           req->rq_err);
1282
1283                 spin_lock_irqsave(&imp->imp_lock, flags);
1284                 list_del_init(&req->rq_list);
1285
1286                 if (req->rq_err ||
1287                     req->rq_import_generation < imp->imp_generation)
1288                         rc = -EIO;
1289
1290
1291                 if (rc) {
1292                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1293                         GOTO(out, rc);
1294                 }
1295
1296                 DEBUG_REQ(D_HA, req, "resumed");
1297         }
1298
1299         /* XXX this is the same as ptlrpc_set_wait */
1300         LASSERT(list_empty(&req->rq_list));
1301         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1302         spin_unlock_irqrestore(&imp->imp_lock, flags);
1303
1304         rc = ptl_send_rpc(req);
1305         if (rc) {
1306                 /* The DLM's fake imports want to avoid all forms of
1307                  * recovery. */
1308                 if (imp->imp_dlm_fake) {
1309                         spin_lock_irqsave(&imp->imp_lock, flags);
1310                         list_del_init(&req->rq_list);
1311                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1312                         GOTO(out, rc);
1313                 }
1314
1315                 DEBUG_REQ(D_ERROR, req, "send failed (%d); recovering", rc);
1316
1317                 ptlrpc_fail_import(imp, req->rq_import_generation);
1318
1319                 /* If we've been told to not wait, we're done. */
1320                 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
1321                     obd->obd_no_recov) {
1322                         spin_lock_irqsave(&imp->imp_lock, flags);
1323                         list_del_init(&req->rq_list);
1324                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1325                         GOTO(out, rc);
1326                 }
1327
1328                 /* If we errored, allow the user to interrupt immediately */
1329                 timeout = 1;
1330         } else {
1331                 timeout = req->rq_timeout * HZ;
1332                 DEBUG_REQ(D_NET, req, "-- sleeping");
1333         }
1334 #ifdef __KERNEL__
1335         lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1336                                req);
1337         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1338 #else
1339         {
1340                 extern int reply_in_callback(ptl_event_t *ev);
1341                 ptl_event_t reply_ev;
1342                 PtlEQWait(req->rq_connection->c_peer.peer_ni->pni_reply_in_eq_h,
1343                           &reply_ev);
1344                 reply_in_callback(&reply_ev);
1345
1346                 LASSERT(reply_ev.mem_desc.user_ptr == (void *)req);
1347                 // ptlrpc_check_reply(req);
1348                 // not required now it only tests
1349         }
1350 #endif
1351
1352         DEBUG_REQ(D_NET, req, "-- done sleeping");
1353
1354         CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:opc "
1355                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1356                imp->imp_obd->obd_uuid.uuid,
1357                req->rq_reqmsg->status, req->rq_xid,
1358                imp->imp_connection->c_peer.peer_ni->pni_name,
1359                imp->imp_connection->c_peer.peer_nid,
1360                req->rq_reqmsg->opc);
1361
1362         spin_lock_irqsave(&imp->imp_lock, flags);
1363         list_del_init(&req->rq_list);
1364         spin_unlock_irqrestore(&imp->imp_lock, flags);
1365
1366         /* If the reply was received normally, this just grabs the spinlock
1367          * (ensuring the reply callback has returned), sees that
1368          * req->rq_receiving_reply is clear and returns. */
1369         ptlrpc_unregister_reply (req);
1370
1371         if (req->rq_err)
1372                 GOTO(out, rc = -EIO);
1373
1374         /* Resend if we need to, unless we were interrupted. */
1375         if (req->rq_resend && !req->rq_intr) {
1376                 /* ...unless we were specifically told otherwise. */
1377                 if (req->rq_no_resend) {
1378                         spin_lock_irqsave (&req->rq_lock, flags);
1379                         req->rq_no_resend = 0;
1380                         spin_unlock_irqrestore (&req->rq_lock, flags);
1381                         GOTO(out, rc = -ETIMEDOUT);
1382                 }
1383                 spin_lock_irqsave (&req->rq_lock, flags);
1384                 req->rq_resend = 0;
1385                 spin_unlock_irqrestore (&req->rq_lock, flags);
1386                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1387
1388                 if (req->rq_bulk != NULL)
1389                         ptlrpc_unregister_bulk (req);
1390
1391                 DEBUG_REQ(D_HA, req, "resending: ");
1392                 spin_lock_irqsave(&imp->imp_lock, flags);
1393                 goto restart;
1394         }
1395
1396         if (req->rq_intr) {
1397                 /* Should only be interrupted if we timed out. */
1398                 if (!req->rq_timedout)
1399                         DEBUG_REQ(D_ERROR, req,
1400                                   "rq_intr set but rq_timedout not");
1401                 GOTO(out, rc = -EINTR);
1402         }
1403
1404         if (req->rq_timedout) {                 /* non-recoverable timeout */
1405                 GOTO(out, rc = -ETIMEDOUT);
1406         }
1407
1408         if (!req->rq_replied) {
1409                 /* How can this be? -eeb */
1410                 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1411                 LBUG();
1412                 GOTO(out, rc = req->rq_status);
1413         }
1414
1415         rc = after_reply (req, &do_restart);
1416         /* NB may return +ve success rc */
1417         if (do_restart) {
1418                 if (req->rq_bulk != NULL)
1419                         ptlrpc_unregister_bulk (req);
1420                 DEBUG_REQ(D_HA, req, "resending: ");
1421                 spin_lock_irqsave(&imp->imp_lock, flags);
1422                 goto restart;
1423         }
1424
1425  out:
1426         if (req->rq_bulk != NULL) {
1427                 if (rc >= 0) {                  /* success so far */
1428                         lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1429                         brc = l_wait_event(req->rq_wait_for_rep,
1430                                            ptlrpc_bulk_complete(req->rq_bulk),
1431                                            &lwi);
1432                         if (brc != 0) {
1433                                 LASSERT(brc == -ETIMEDOUT);
1434                                 CERROR ("Timed out waiting for bulk\n");
1435                                 rc = brc;
1436                         }
1437                 }
1438                 if (rc < 0) {
1439                         /* MDS blocks for put ACKs before replying */
1440                         /* OSC sets rq_no_resend for the time being */
1441                         LASSERT(req->rq_no_resend);
1442                         ptlrpc_unregister_bulk (req);
1443                 }
1444         }
1445
1446         LASSERT(!req->rq_receiving_reply);
1447         req->rq_phase = RQ_PHASE_INTERPRET;
1448         RETURN(rc);
1449 }
1450
1451 int ptlrpc_replay_req(struct ptlrpc_request *req)
1452 {
1453         int rc = 0, old_level, old_status = 0;
1454         // struct ptlrpc_client *cli = req->rq_import->imp_client;
1455         struct l_wait_info lwi;
1456         ENTRY;
1457
1458         /* I don't touch rq_phase here, so the debug log can show what
1459          * state it was left in */
1460
1461         /* Not handling automatic bulk replay yet (or ever?) */
1462         LASSERT(req->rq_bulk == NULL);
1463
1464         DEBUG_REQ(D_NET, req, "about to replay");
1465
1466         /* Update request's state, since we might have a new connection. */
1467         ptlrpc_put_connection(req->rq_connection);
1468         req->rq_connection =
1469                 ptlrpc_connection_addref(req->rq_import->imp_connection);
1470
1471         /* temporarily set request to RECOVD level (reset at out:) */
1472         old_level = req->rq_level;
1473         if (req->rq_replied)
1474                 old_status = req->rq_repmsg->status;
1475         req->rq_level = LUSTRE_CONN_RECOVER;
1476         rc = ptl_send_rpc(req);
1477         if (rc) {
1478                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
1479                 ptlrpc_cleanup_request_buf(req);
1480                 // up(&cli->cli_rpc_sem);
1481                 GOTO(out, rc = -rc);
1482         }
1483
1484         CDEBUG(D_OTHER, "-- sleeping\n");
1485         lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
1486         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1487         CDEBUG(D_OTHER, "-- done\n");
1488
1489         // up(&cli->cli_rpc_sem);
1490
1491         /* If the reply was received normally, this just grabs the spinlock
1492          * (ensuring the reply callback has returned), sees that
1493          * req->rq_receiving_reply is clear and returns. */
1494         ptlrpc_unregister_reply (req);
1495
1496         if (!req->rq_replied) {
1497                 CERROR("Unknown reason for wakeup\n");
1498                 /* XXX Phil - I end up here when I kill obdctl */
1499                 /* ...that's because signals aren't all masked in
1500                  * l_wait_event() -eeb */
1501                 GOTO(out, rc = -EINTR);
1502         }
1503
1504 #if SWAB_PARANOIA
1505         /* Clear reply swab mask; this is a new reply in sender's byte order */
1506         req->rq_rep_swab_mask = 0;
1507 #endif
1508         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
1509         if (rc) {
1510                 CERROR("unpack_rep failed: %d\n", rc);
1511                 GOTO(out, rc = -EPROTO);
1512         }
1513 #if 0
1514         /* FIXME: Enable when BlueArc makes new release */
1515         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
1516             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
1517                 CERROR("invalid packet type received (type=%u)\n",
1518                        req->rq_repmsg->type);
1519                 GOTO(out, rc = -EPROTO);
1520         }
1521 #endif
1522
1523         /* The transno had better not change over replay. */
1524         LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1525
1526         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
1527
1528         /* let the callback do fixups, possibly including in the request */
1529         if (req->rq_replay_cb)
1530                 req->rq_replay_cb(req);
1531
1532         if (req->rq_replied && req->rq_repmsg->status != old_status) {
1533                 DEBUG_REQ(D_HA, req, "status %d, old was %d",
1534                           req->rq_repmsg->status, old_status);
1535         }
1536
1537  out:
1538         req->rq_level = old_level;
1539         RETURN(rc);
1540 }
1541
1542 void ptlrpc_abort_inflight(struct obd_import *imp)
1543 {
1544         unsigned long flags;
1545         struct list_head *tmp, *n;
1546         ENTRY;
1547
1548         /* Make sure that no new requests get processed for this import.
1549          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1550          * this flag and then putting requests on sending_list or delayed_list.
1551          */
1552         spin_lock_irqsave(&imp->imp_lock, flags);
1553
1554         /* XXX locking?  Maybe we should remove each request with the list
1555          * locked?  Also, how do we know if the requests on the list are
1556          * being freed at this time?
1557          */
1558         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1559                 struct ptlrpc_request *req =
1560                         list_entry(tmp, struct ptlrpc_request, rq_list);
1561
1562                 DEBUG_REQ(D_HA, req, "inflight");
1563
1564                 spin_lock (&req->rq_lock);
1565                 if (req->rq_import_generation < imp->imp_generation) {
1566                         req->rq_err = 1;
1567                         if (req->rq_set != NULL)
1568                                 wake_up(&req->rq_set->set_waitq);
1569                         else
1570                                 wake_up(&req->rq_wait_for_rep);
1571                 }
1572                 spin_unlock (&req->rq_lock);
1573         }
1574
1575         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1576                 struct ptlrpc_request *req =
1577                         list_entry(tmp, struct ptlrpc_request, rq_list);
1578
1579                 DEBUG_REQ(D_HA, req, "aborting waiting req");
1580
1581                 spin_lock (&req->rq_lock);
1582                 if (req->rq_import_generation < imp->imp_generation) {
1583                         req->rq_err = 1;
1584                         if (req->rq_set != NULL)
1585                                 wake_up(&req->rq_set->set_waitq);
1586                         else
1587                                 wake_up(&req->rq_wait_for_rep);
1588                         spin_unlock (&req->rq_lock);
1589                 }
1590         }
1591
1592         /* Last chance to free reqs left on the replay list, but we
1593          * will still leak reqs that haven't comitted.  */
1594         if (imp->imp_replayable)
1595                 ptlrpc_free_committed(imp);
1596
1597         spin_unlock_irqrestore(&imp->imp_lock, flags);
1598
1599         EXIT;
1600 }
1601
1602 static __u64 ptlrpc_last_xid = 0;
1603 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1604
1605 __u64 ptlrpc_next_xid(void)
1606 {
1607         __u64 tmp;
1608         spin_lock(&ptlrpc_last_xid_lock);
1609         tmp = ++ptlrpc_last_xid;
1610         spin_unlock(&ptlrpc_last_xid_lock);
1611         return tmp;
1612 }
1613
1614