Whamcloud - gitweb
merge b_devel into HEAD (20030626 merge tag) for 0.7.1
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24 #ifndef __KERNEL__
25 #include <errno.h>
26 #include <signal.h>
27 #include <liblustre.h>
28 #endif
29
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_ha.h>
34 #include <linux/lustre_import.h>
35
36 #include "ptlrpc_internal.h"
37
38 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
39                         struct ptlrpc_client *cl)
40 {
41         cl->cli_request_portal = req_portal;
42         cl->cli_reply_portal   = rep_portal;
43         cl->cli_name           = name;
44 }
45
46 struct obd_uuid *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
47 {
48         return &req->rq_connection->c_remote_uuid;
49 }
50
51 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
52 {
53         struct ptlrpc_connection *c;
54         struct ptlrpc_peer peer;
55         int err;
56
57         err = ptlrpc_uuid_to_peer(uuid, &peer);
58         if (err != 0) {
59                 CERROR("cannot find peer %s!\n", uuid->uuid);
60                 return NULL;
61         }
62
63         c = ptlrpc_get_connection(&peer, uuid);
64         if (c) {
65                 memcpy(c->c_remote_uuid.uuid,
66                        uuid->uuid, sizeof(c->c_remote_uuid.uuid));
67                 c->c_epoch++;
68         }
69
70         CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
71
72         return c;
73 }
74
75 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
76                                  struct obd_uuid *uuid)
77 {
78         struct ptlrpc_peer peer;
79         int err;
80
81         err = ptlrpc_uuid_to_peer (uuid, &peer);
82         if (err != 0) {
83                 CERROR("cannot find peer %s!\n", uuid->uuid);
84                 return;
85         }
86
87         memcpy (&conn->c_peer, &peer, sizeof (peer));
88         return;
89 }
90
91 static inline struct ptlrpc_bulk_desc *new_bulk(void)
92 {
93         struct ptlrpc_bulk_desc *desc;
94
95         OBD_ALLOC(desc, sizeof(*desc));
96         if (!desc)
97                 return NULL;
98
99         spin_lock_init (&desc->bd_lock);
100         init_waitqueue_head(&desc->bd_waitq);
101         INIT_LIST_HEAD(&desc->bd_page_list);
102         desc->bd_md_h = PTL_HANDLE_NONE;
103         desc->bd_me_h = PTL_HANDLE_NONE;
104
105         return desc;
106 }
107
108 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
109                                                int type, int portal)
110 {
111         struct obd_import       *imp = req->rq_import;
112         unsigned long            flags;
113         struct ptlrpc_bulk_desc *desc;
114
115         LASSERT (type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
116
117         desc = new_bulk();
118         if (desc == NULL)
119                 RETURN(NULL);
120
121         /* Is this sampled at the right place?  Do we want to get the import
122          * generation just before we send?  Should it match the generation of
123          * the request? */
124         spin_lock_irqsave(&imp->imp_lock, flags);
125         desc->bd_import_generation = imp->imp_generation;
126         spin_unlock_irqrestore(&imp->imp_lock, flags);
127
128         desc->bd_import = class_import_get(imp);
129         desc->bd_req = req;
130         desc->bd_type = type;
131         desc->bd_portal = portal;
132
133         /* This makes req own desc, and free it when she frees herself */
134         req->rq_bulk = desc;
135
136         return desc;
137 }
138
139 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
140                                                int type, int portal)
141 {
142         struct obd_export       *exp = req->rq_export;
143         struct ptlrpc_bulk_desc *desc;
144
145         LASSERT (type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
146
147         desc = new_bulk();
148         if (desc == NULL)
149                 RETURN(NULL);
150
151         desc->bd_export = class_export_get(exp);
152         desc->bd_req = req;
153         desc->bd_type = type;
154         desc->bd_portal = portal;
155
156         /* NB we don't assign rq_bulk here; server-side requests are
157          * re-used, and the handler frees the bulk desc explicitly. */
158
159         return desc;
160 }
161
162 int ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
163                           struct page *page, int pageoffset, int len)
164 {
165         struct ptlrpc_bulk_page *bulk;
166
167         OBD_ALLOC(bulk, sizeof(*bulk));
168         if (bulk == NULL)
169                 return (-ENOMEM);
170
171         LASSERT (page != NULL);
172         LASSERT (pageoffset >= 0);
173         LASSERT (len > 0);
174         LASSERT (pageoffset + len <= PAGE_SIZE);
175
176         bulk->bp_page = page;
177         bulk->bp_pageoffset = pageoffset;
178         bulk->bp_buflen = len;
179
180         bulk->bp_desc = desc;
181         list_add_tail(&bulk->bp_link, &desc->bd_page_list);
182         desc->bd_page_count++;
183         return 0;
184 }
185
186 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
187 {
188         struct list_head *tmp, *next;
189         ENTRY;
190
191         LASSERT (desc != NULL);
192         LASSERT (desc->bd_page_count != 0x5a5a5a5a); /* not freed already */
193         LASSERT (!desc->bd_network_rw);         /* network hands off or */
194
195         list_for_each_safe(tmp, next, &desc->bd_page_list) {
196                 struct ptlrpc_bulk_page *bulk;
197                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
198                 ptlrpc_free_bulk_page(bulk);
199         }
200
201         LASSERT (desc->bd_page_count == 0);
202         LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
203
204         if (desc->bd_export)
205                 class_export_put(desc->bd_export);
206         else
207                 class_import_put(desc->bd_import);
208
209         OBD_FREE(desc, sizeof(*desc));
210         EXIT;
211 }
212
213 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
214 {
215         LASSERT (bulk != NULL);
216
217         list_del(&bulk->bp_link);
218         bulk->bp_desc->bd_page_count--;
219         OBD_FREE(bulk, sizeof(*bulk));
220 }
221
222 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
223                                        int count, int *lengths, char **bufs)
224 {
225         struct ptlrpc_request *request;
226         int rc;
227         ENTRY;
228
229         LASSERT((unsigned long)imp > 0x1000);
230
231         OBD_ALLOC(request, sizeof(*request));
232         if (!request) {
233                 CERROR("request allocation out of memory\n");
234                 RETURN(NULL);
235         }
236
237         rc = lustre_pack_msg(count, lengths, bufs,
238                              &request->rq_reqlen, &request->rq_reqmsg);
239         if (rc) {
240                 CERROR("cannot pack request %d\n", rc);
241                 OBD_FREE(request, sizeof(*request));
242                 RETURN(NULL);
243         }
244
245         request->rq_timeout = obd_timeout;
246         request->rq_level = LUSTRE_CONN_FULL;
247         request->rq_type = PTL_RPC_MSG_REQUEST;
248         request->rq_import = class_import_get(imp);
249         request->rq_phase = RQ_PHASE_NEW;
250
251         /* XXX FIXME bug 249 */
252         request->rq_request_portal = imp->imp_client->cli_request_portal;
253         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
254
255         request->rq_connection = ptlrpc_connection_addref(imp->imp_connection);
256
257         spin_lock_init (&request->rq_lock);
258         INIT_LIST_HEAD(&request->rq_list);
259         init_waitqueue_head(&request->rq_wait_for_rep);
260         request->rq_xid = ptlrpc_next_xid();
261         atomic_set(&request->rq_refcount, 1);
262
263         request->rq_reqmsg->opc = opcode;
264         request->rq_reqmsg->flags = 0;
265
266         RETURN(request);
267 }
268
269 struct ptlrpc_request_set *ptlrpc_prep_set(void)
270 {
271         struct ptlrpc_request_set *set;
272
273         OBD_ALLOC(set, sizeof *set);
274         if (!set)
275                 RETURN(NULL);
276         INIT_LIST_HEAD(&set->set_requests);
277         init_waitqueue_head(&set->set_waitq);
278         set->set_remaining = 0;
279
280         RETURN(set);
281 }
282
283 /* Finish with this set; opposite of prep_set. */
284 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
285 {
286         struct list_head *tmp;
287         struct list_head *next;
288         int               expected_phase;
289         int               n = 0;
290         ENTRY;
291
292         /* Requests on the set should either all be completed, or all be new */
293         expected_phase = (set->set_remaining == 0) ?
294                          RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
295         list_for_each (tmp, &set->set_requests) {
296                 struct ptlrpc_request *req =
297                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
298
299                 LASSERT (req->rq_phase == expected_phase);
300                 n++;
301         }
302
303         LASSERT (set->set_remaining == 0 || set->set_remaining == n);
304
305         list_for_each_safe(tmp, next, &set->set_requests) {
306                 struct ptlrpc_request *req =
307                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
308                 list_del_init(&req->rq_set_chain);
309
310                 LASSERT (req->rq_phase == expected_phase);
311
312                 if (req->rq_phase == RQ_PHASE_NEW) {
313
314                         if (req->rq_interpret_reply != NULL) {
315                                 int (*interpreter)(struct ptlrpc_request *,
316                                                    void *, int) =
317                                         req->rq_interpret_reply;
318
319                                 /* higher level (i.e. LOV) failed;
320                                  * let the sub reqs clean up */
321                                 req->rq_status = -EBADR;
322                                 interpreter(req, &req->rq_async_args, req->rq_status);
323                         }
324                         set->set_remaining--;
325                 }
326
327                 req->rq_set = NULL;
328                 ptlrpc_req_finished (req);
329         }
330
331         LASSERT(set->set_remaining == 0);
332
333         OBD_FREE(set, sizeof(*set));
334         EXIT;
335 }
336
337 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
338                         struct ptlrpc_request *req)
339 {
340         /* The set takes over the caller's request reference */
341         list_add_tail(&req->rq_set_chain, &set->set_requests);
342         req->rq_set = set;
343         set->set_remaining++;
344 }
345
346 static int ptlrpc_check_reply(struct ptlrpc_request *req)
347 {
348         unsigned long flags;
349         int rc = 0;
350         ENTRY;
351
352         /* serialise with network callback */
353         spin_lock_irqsave (&req->rq_lock, flags);
354
355         if (req->rq_replied) {
356                 DEBUG_REQ(D_NET, req, "REPLIED:");
357                 GOTO(out, rc = 1);
358         }
359
360         if (req->rq_err) {
361                 DEBUG_REQ(D_ERROR, req, "ABORTED:");
362                 GOTO(out, rc = 1);
363         }
364
365         if (req->rq_resend) {
366                 DEBUG_REQ(D_ERROR, req, "RESEND:");
367                 GOTO(out, rc = 1);
368         }
369
370         if (req->rq_restart) {
371                 DEBUG_REQ(D_ERROR, req, "RESTART:");
372                 GOTO(out, rc = 1);
373         }
374         EXIT;
375  out:
376         spin_unlock_irqrestore (&req->rq_lock, flags);
377         DEBUG_REQ(D_NET, req, "rc = %d for", rc);
378         return rc;
379 }
380
381 static int ptlrpc_check_status(struct ptlrpc_request *req)
382 {
383         int err;
384         ENTRY;
385
386         err = req->rq_repmsg->status;
387         if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
388                 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR (%d)", err);
389                 if (err >= 0)
390                         CERROR("Error Reply has >= zero status\n");
391                 RETURN(err < 0 ? err : -EINVAL);
392         }
393
394         if (err < 0) {
395                 DEBUG_REQ(D_INFO, req, "status is %d", err);
396         } else if (err > 0) {
397                 /* XXX: translate this error from net to host */
398                 DEBUG_REQ(D_INFO, req, "status is %d", err);
399         }
400
401         RETURN(err);
402 }
403
404 #warning this needs to change after robert fixes eviction handling
405 static int after_reply(struct ptlrpc_request *req, int *restartp)
406 {
407         unsigned long flags;
408         struct obd_import *imp = req->rq_import;
409         int rc;
410         ENTRY;
411
412         LASSERT (!req->rq_receiving_reply);
413         LASSERT (req->rq_replied);
414
415         if (restartp != NULL)
416                 *restartp = 0;
417
418         /* NB Until this point, the whole of the incoming message,
419          * including buflens, status etc is in the sender's byte order. */
420
421 #if SWAB_PARANOIA
422         /* Clear reply swab mask; this is a new reply in sender's byte order */
423         req->rq_rep_swab_mask = 0;
424 #endif
425         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
426         if (rc) {
427                 CERROR("unpack_rep failed: %d\n", rc);
428                 RETURN (-EPROTO);
429         }
430
431         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
432             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
433                 CERROR("invalid packet type received (type=%u)\n",
434                        req->rq_repmsg->type);
435                 RETURN (-EPROTO);
436         }
437
438         /* Store transno in reqmsg for replay. */
439         req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
440
441         rc = ptlrpc_check_status(req);
442
443         /* Either we've been evicted, or the server has failed for
444          * some reason. Try to reconnect, and if that fails, punt to the
445          * upcall. */
446         if (rc == -ENOTCONN) {
447                 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
448                     imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
449                         RETURN(-ENOTCONN);
450                 }
451
452                 rc = ptlrpc_request_handle_eviction(req);
453                 if (rc)
454                         CERROR("can't reconnect to %s@%s: %d\n",
455                                imp->imp_target_uuid.uuid,
456                                imp->imp_connection->c_remote_uuid.uuid, rc);
457                 else
458                         ptlrpc_wake_delayed(imp);
459
460                 if (req->rq_err)
461                         RETURN(-EIO);
462
463                 if (req->rq_resend) {
464                         if (restartp == NULL)
465                                 LBUG(); /* async resend not supported yet */
466                         spin_lock_irqsave (&req->rq_lock, flags);
467                         req->rq_resend = 0;
468                         spin_unlock_irqrestore (&req->rq_lock, flags);
469                         *restartp = 1;
470                         lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
471                         DEBUG_REQ(D_HA, req, "resending: ");
472                         RETURN (0);
473                 }
474
475                 CERROR("request should be err or resend: %p\n", req);
476                 LBUG();
477         }
478
479         if (req->rq_import->imp_replayable) {
480                 spin_lock_irqsave(&imp->imp_lock, flags);
481                 if ((req->rq_replay || req->rq_transno != 0) && rc >= 0)
482                         ptlrpc_retain_replayable_request(req, imp);
483
484                 if (req->rq_transno > imp->imp_max_transno)
485                         imp->imp_max_transno = req->rq_transno;
486
487                 /* Replay-enabled imports return commit-status information. */
488                 if (req->rq_repmsg->last_committed) {
489                         if (req->rq_repmsg->last_committed <
490                             imp->imp_peer_committed_transno) {
491                                 CERROR("%s went back in time (transno "LPD64
492                                        " was committed, server claims "LPD64
493                                        ")! is shared storage not coherent?\n",
494                                        imp->imp_target_uuid.uuid,
495                                        imp->imp_peer_committed_transno,
496                                        req->rq_repmsg->last_committed);
497                         }
498                         imp->imp_peer_committed_transno =
499                                 req->rq_repmsg->last_committed;
500                 }
501                 ptlrpc_free_committed(imp);
502                 spin_unlock_irqrestore(&imp->imp_lock, flags);
503         }
504
505         RETURN(rc);
506 }
507
508 static int check_set(struct ptlrpc_request_set *set)
509 {
510         unsigned long flags;
511         struct list_head *tmp;
512         ENTRY;
513
514         if (set->set_remaining == 0)
515                 RETURN(1);
516
517         list_for_each(tmp, &set->set_requests) {
518                 struct ptlrpc_request *req =
519                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
520                 struct obd_import *imp = req->rq_import;
521                 int rc = 0;
522
523                 if (!(req->rq_phase == RQ_PHASE_RPC ||
524                       req->rq_phase == RQ_PHASE_BULK ||
525                       req->rq_phase == RQ_PHASE_INTERPRET ||
526                       req->rq_phase == RQ_PHASE_COMPLETE)) {
527                         DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
528                         LBUG();
529                 }
530
531                 if (req->rq_phase == RQ_PHASE_COMPLETE)
532                         continue;
533
534                 if (req->rq_phase == RQ_PHASE_INTERPRET)
535                         GOTO (interpret, req->rq_status);
536                 
537                 if (req->rq_err) {
538                         ptlrpc_unregister_reply(req);
539                         if (req->rq_status == 0)
540                                 req->rq_status = -EIO;
541                         req->rq_phase = RQ_PHASE_INTERPRET;
542
543                         spin_lock_irqsave(&imp->imp_lock, flags);
544                         list_del_init(&req->rq_list);
545                         spin_unlock_irqrestore(&imp->imp_lock, flags);
546
547                         GOTO (interpret, req->rq_status);
548                 }
549
550                 if (req->rq_intr) {
551                         /* NB could be on delayed list */
552                         ptlrpc_unregister_reply(req);
553                         req->rq_status = -EINTR;
554                         req->rq_phase = RQ_PHASE_INTERPRET;
555
556                         spin_lock_irqsave(&imp->imp_lock, flags);
557                         list_del_init(&req->rq_list);
558                         spin_unlock_irqrestore(&imp->imp_lock, flags);
559
560                         GOTO (interpret, req->rq_status);
561                 }
562
563                 if (req->rq_phase == RQ_PHASE_RPC) {
564                         int do_restart = 0;
565                         if (req->rq_waiting || req->rq_resend) {
566                                 spin_lock_irqsave(&imp->imp_lock, flags);
567
568                                 if (req->rq_level > imp->imp_level) {
569                                         spin_unlock_irqrestore(&imp->imp_lock,
570                                                                flags);
571                                         continue;
572                                 }
573
574                                 list_del(&req->rq_list);
575                                 list_add_tail(&req->rq_list,
576                                               &imp->imp_sending_list);
577                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
578
579                                 req->rq_waiting = 0;
580                                 if (req->rq_resend) {
581                                         lustre_msg_add_flags(req->rq_reqmsg,
582                                                              MSG_RESENT);
583                                         spin_lock_irqsave(&req->rq_lock, flags);
584                                         req->rq_resend = 0;
585                                         spin_unlock_irqrestore(&req->rq_lock,
586                                                                flags);
587                                         ptlrpc_unregister_reply(req);
588                                         if (req->rq_bulk)
589                                                 ptlrpc_unregister_bulk(req);
590                                }
591
592                                 rc = ptl_send_rpc(req);
593                                 if (rc) {
594                                         req->rq_status = rc;
595                                         req->rq_phase = RQ_PHASE_INTERPRET;
596                                         GOTO (interpret, req->rq_status);
597                                 }
598
599                         }
600
601                         /* Ensure the network callback returned */
602                         spin_lock_irqsave (&req->rq_lock, flags);
603                         if (!req->rq_replied) {
604                                 spin_unlock_irqrestore (&req->rq_lock, flags);
605                                 continue;
606                         }
607                         spin_unlock_irqrestore (&req->rq_lock, flags);
608
609                         spin_lock_irqsave(&imp->imp_lock, flags);
610                         list_del_init(&req->rq_list);
611                         spin_unlock_irqrestore(&imp->imp_lock, flags);
612
613                         req->rq_status = after_reply(req, &do_restart);
614                         if (do_restart) {
615                                 spin_lock_irqsave (&req->rq_lock, flags);
616                                 req->rq_resend = 1; /* ugh */
617                                 spin_unlock_irqrestore (&req->rq_lock, flags);
618                                 continue;
619                         }
620
621                         /* If there is no bulk associated with this request,
622                          * then we're done and should let the interpreter
623                          * process the reply.  Similarly if the RPC returned
624                          * an error, and therefore the bulk will never arrive.
625                          */
626                         if (req->rq_bulk == NULL || req->rq_status != 0) {
627                                 req->rq_phase = RQ_PHASE_INTERPRET;
628                                 GOTO (interpret, req->rq_status);
629                         }
630
631                         req->rq_phase = RQ_PHASE_BULK;
632                 }
633
634                 LASSERT (req->rq_phase == RQ_PHASE_BULK);
635                 if (!ptlrpc_bulk_complete (req->rq_bulk))
636                         continue;
637
638                 req->rq_phase = RQ_PHASE_INTERPRET;
639
640         interpret:
641                 LASSERT (req->rq_phase == RQ_PHASE_INTERPRET);
642                 LASSERT (!req->rq_receiving_reply);
643
644                 if (req->rq_bulk != NULL)
645                         ptlrpc_unregister_bulk (req);
646
647                 if (req->rq_interpret_reply != NULL) {
648                         int (*interpreter)(struct ptlrpc_request *,void *,int) =
649                                 req->rq_interpret_reply;
650                         req->rq_status = interpreter(req, &req->rq_async_args,
651                                                      req->rq_status);
652                 }
653
654                 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:"
655                        "opc %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
656                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
657                        req->rq_xid,
658                        imp->imp_connection->c_peer.peer_ni->pni_name,
659                        imp->imp_connection->c_peer.peer_nid,
660                        req->rq_reqmsg->opc);
661
662                 req->rq_phase = RQ_PHASE_COMPLETE;
663                 set->set_remaining--;
664         }
665
666         RETURN (set->set_remaining == 0);
667 }
668
669 static int expire_one_request(struct ptlrpc_request *req)
670 {
671         unsigned long      flags;
672         struct obd_import *imp = req->rq_import;
673         ENTRY;
674
675         DEBUG_REQ(D_ERROR, req, "timeout");
676
677         spin_lock_irqsave (&req->rq_lock, flags);
678         req->rq_timedout = 1;
679         spin_unlock_irqrestore (&req->rq_lock, flags);
680
681         ptlrpc_unregister_reply (req);
682
683         if (imp == NULL) {
684                 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
685                 RETURN(1);
686         }
687
688         /* The DLM server doesn't want recovery run on its imports. */
689         if (imp->imp_dlm_fake)
690                 RETURN(1);
691
692         /* If this request is for recovery or other primordial tasks,
693          * don't go back to sleep, and don't start recovery again.. */
694         if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
695             imp->imp_obd->obd_no_recov)
696                 RETURN(1);
697
698         ptlrpc_fail_import(imp, req->rq_import_generation);
699
700         RETURN(0);
701 }
702
703 static int expired_set(void *data)
704 {
705         struct ptlrpc_request_set *set = data;
706         struct list_head          *tmp;
707         time_t                     now = LTIME_S (CURRENT_TIME);
708         ENTRY;
709
710         LASSERT (set != NULL);
711
712         /* A timeout expired; see which reqs it applies to... */
713         list_for_each (tmp, &set->set_requests) {
714                 struct ptlrpc_request *req =
715                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
716
717                 /* request in-flight? */
718                 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
719                       (req->rq_phase == RQ_PHASE_BULK)))
720                         continue;
721
722                 if (req->rq_timedout ||           /* already dealt with */
723                     req->rq_sent + req->rq_timeout > now) /* not expired */
724                         continue;
725
726                 /* deal with this guy */
727                 expire_one_request (req);
728         }
729
730         /* When waiting for a whole set, we always to break out of the
731          * sleep so we can recalculate the timeout, or enable interrupts
732          * iff everyone's timed out.
733          */
734         RETURN(1);
735 }
736
737 static void interrupted_set(void *data)
738 {
739         struct ptlrpc_request_set *set = data;
740         struct list_head *tmp;
741         unsigned long flags;
742
743         LASSERT (set != NULL);
744         CERROR("INTERRUPTED SET %p\n", set);
745
746         list_for_each(tmp, &set->set_requests) {
747                 struct ptlrpc_request *req =
748                         list_entry(tmp, struct ptlrpc_request, rq_set_chain);
749
750                 if (req->rq_phase != RQ_PHASE_RPC)
751                         continue;
752
753                 spin_lock_irqsave (&req->rq_lock, flags);
754                 req->rq_intr = 1;
755                 spin_unlock_irqrestore (&req->rq_lock, flags);
756         }
757 }
758
759 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
760 {
761         struct list_head      *tmp;
762         struct obd_import     *imp;
763         struct ptlrpc_request *req;
764         struct l_wait_info     lwi;
765         unsigned long          flags;
766         int                    rc;
767         time_t                 now;
768         time_t                 deadline;
769         int                    timeout;
770         ENTRY;
771
772         LASSERT(!list_empty(&set->set_requests));
773         list_for_each(tmp, &set->set_requests) {
774                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
775
776                 LASSERT (req->rq_level == LUSTRE_CONN_FULL);
777                 LASSERT (req->rq_phase == RQ_PHASE_NEW);
778                 req->rq_phase = RQ_PHASE_RPC;
779
780                 imp = req->rq_import;
781                 spin_lock_irqsave(&imp->imp_lock, flags);
782
783                 if (imp->imp_invalid) {
784                         spin_unlock_irqrestore(&imp->imp_lock, flags);
785                         req->rq_status = -EIO;
786                         req->rq_phase = RQ_PHASE_INTERPRET;
787                         continue;
788                 }
789
790                 if (req->rq_level > imp->imp_level) {
791                         if (req->rq_no_recov || imp->imp_obd->obd_no_recov ||
792                             imp->imp_dlm_fake) {
793                                 spin_unlock_irqrestore(&imp->imp_lock, flags);
794                                 req->rq_status = -EWOULDBLOCK;
795                                 req->rq_phase = RQ_PHASE_INTERPRET;
796                                 continue;
797                         }
798
799                         spin_lock (&req->rq_lock);
800                         req->rq_waiting = 1;
801                         spin_unlock (&req->rq_lock);
802                         LASSERT (list_empty (&req->rq_list));
803                         // list_del(&req->rq_list);
804                         list_add_tail(&req->rq_list, &imp->imp_delayed_list);
805                         spin_unlock_irqrestore(&imp->imp_lock, flags);
806                         continue;
807                 }
808
809                 /* XXX this is the same as ptlrpc_queue_wait */
810                 LASSERT(list_empty(&req->rq_list));
811                 list_add_tail(&req->rq_list, &imp->imp_sending_list);
812                 req->rq_import_generation = imp->imp_generation;
813                 spin_unlock_irqrestore(&imp->imp_lock, flags);
814
815                 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc"
816                        " %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
817                        imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
818                        req->rq_xid,
819                        imp->imp_connection->c_peer.peer_ni->pni_name,
820                        imp->imp_connection->c_peer.peer_nid,
821                        req->rq_reqmsg->opc);
822
823                 rc = ptl_send_rpc(req);
824                 if (rc) {
825                         req->rq_status = rc;
826                         req->rq_phase = RQ_PHASE_INTERPRET;
827                 }
828         }
829
830         do {
831                 now = LTIME_S (CURRENT_TIME);
832                 timeout = 0;
833                 list_for_each (tmp, &set->set_requests) {
834                         req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
835
836                         /* request in-flight? */
837                         if (!((req->rq_phase == RQ_PHASE_RPC &&
838                                !req->rq_waiting) ||
839                               (req->rq_phase == RQ_PHASE_BULK)))
840                                 continue;
841
842                         if (req->rq_timedout)   /* already timed out */
843                                 continue;
844
845                         deadline = req->rq_sent + req->rq_timeout;
846                         if (deadline <= now)    /* actually expired already */
847                                 timeout = 1;    /* ASAP */
848                         else if (timeout == 0 || timeout > deadline - now)
849                                 timeout = deadline - now;
850                 }
851
852                 /* wait until all complete, interrupted, or an in-flight
853                  * req times out */
854                 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
855                        set, timeout);
856                 lwi = LWI_TIMEOUT_INTR(timeout ? timeout * HZ : 1,
857                                        expired_set, interrupted_set, set);
858                 rc = l_wait_event(set->set_waitq, check_set(set), &lwi);
859
860                 LASSERT (rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
861
862                 /* -EINTR => all requests have been flagged rq_intr so next
863                  * check completes.
864                  * -ETIMEOUTD => someone timed out.  When all reqs have
865                  * timed out, signals are enabled allowing completion with
866                  * EINTR.
867                  * I don't really care if we go once more round the loop in
868                  * the error cases -eeb. */
869         } while (rc != 0);
870
871         LASSERT (set->set_remaining == 0);
872
873         rc = 0;
874         list_for_each(tmp, &set->set_requests) {
875                 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
876
877                 LASSERT (req->rq_phase == RQ_PHASE_COMPLETE);
878                 if (req->rq_status != 0)
879                         rc = req->rq_status;
880         }
881
882         if (set->set_interpret != NULL) {
883                 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
884                         set->set_interpret;
885                 rc = interpreter (set, &set->set_args, rc);
886         }
887
888         RETURN(rc);
889 }
890
891 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
892 {
893         ENTRY;
894         if (request == NULL) {
895                 EXIT;
896                 return;
897         }
898
899         LASSERT (!request->rq_receiving_reply);
900
901         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
902          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
903         if (request->rq_import != NULL) {
904                 unsigned long flags = 0;
905                 if (!locked)
906                         spin_lock_irqsave(&request->rq_import->imp_lock, flags);
907                 list_del_init(&request->rq_list);
908                 if (!locked)
909                         spin_unlock_irqrestore(&request->rq_import->imp_lock,
910                                                flags);
911         }
912
913         if (atomic_read(&request->rq_refcount) != 0) {
914                 DEBUG_REQ(D_ERROR, request,
915                           "freeing request with nonzero refcount");
916                 LBUG();
917         }
918
919         if (request->rq_repmsg != NULL) {
920                 OBD_FREE(request->rq_repmsg, request->rq_replen);
921                 request->rq_repmsg = NULL;
922         }
923         if (request->rq_reqmsg != NULL) {
924                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
925                 request->rq_reqmsg = NULL;
926         }
927         if (request->rq_export != NULL) {
928                 class_export_put(request->rq_export);
929                 request->rq_export = NULL;
930         }
931         if (request->rq_import != NULL) {
932                 class_import_put(request->rq_import);
933                 request->rq_import = NULL;
934         }
935         if (request->rq_bulk != NULL)
936                 ptlrpc_free_bulk(request->rq_bulk);
937
938         ptlrpc_put_connection(request->rq_connection);
939         OBD_FREE(request, sizeof(*request));
940         EXIT;
941 }
942
943 void ptlrpc_free_req(struct ptlrpc_request *request)
944 {
945         __ptlrpc_free_req(request, 0);
946 }
947
948 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
949 {
950         ENTRY;
951         if (request == NULL)
952                 RETURN(1);
953
954         if (request == (void *)(long)(0x5a5a5a5a5a5a5a5a)) {
955                 CERROR("dereferencing freed request (bug 575)\n");
956                 LBUG();
957                 RETURN(1);
958         }
959
960         DEBUG_REQ(D_INFO, request, "refcount now %u",
961                   atomic_read(&request->rq_refcount) - 1);
962
963         if (atomic_dec_and_test(&request->rq_refcount)) {
964                 __ptlrpc_free_req(request, locked);
965                 RETURN(1);
966         }
967
968         RETURN(0);
969 }
970
971 void ptlrpc_req_finished(struct ptlrpc_request *request)
972 {
973         __ptlrpc_req_finished(request, 0);
974 }
975
976 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
977 {
978         OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
979         request->rq_reqmsg = NULL;
980         request->rq_reqlen = 0;
981 }
982
983 /* Disengage the client's reply buffer from the network
984  * NB does _NOT_ unregister any client-side bulk.
985  * IDEMPOTENT, but _not_ safe against concurrent callers.
986  * The request owner (i.e. the thread doing the I/O) must call...
987  */
988 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
989 {
990         unsigned long flags;
991         int           rc;
992         ENTRY;
993
994         LASSERT (!in_interrupt ());             /* might sleep */
995
996         spin_lock_irqsave (&request->rq_lock, flags);
997         if (!request->rq_receiving_reply) {     /* not waiting for a reply */
998                 spin_unlock_irqrestore (&request->rq_lock, flags);
999                 EXIT;
1000                 /* NB reply buffer not freed here */
1001                 return;
1002         }
1003
1004         LASSERT (!request->rq_replied);         /* callback hasn't completed */
1005         spin_unlock_irqrestore (&request->rq_lock, flags);
1006
1007         rc = PtlMDUnlink (request->rq_reply_md_h);
1008         switch (rc) {
1009         default:
1010                 LBUG ();
1011
1012         case PTL_OK:                            /* unlinked before completion */
1013                 LASSERT (request->rq_receiving_reply);
1014                 LASSERT (!request->rq_replied);
1015                 spin_lock_irqsave (&request->rq_lock, flags);
1016                 request->rq_receiving_reply = 0;
1017                 spin_unlock_irqrestore (&request->rq_lock, flags);
1018                 OBD_FREE(request->rq_repmsg, request->rq_replen);
1019                 request->rq_repmsg = NULL;
1020                 EXIT;
1021                 return;
1022
1023         case PTL_MD_INUSE:                      /* callback in progress */
1024                 for (;;) {
1025                         /* Network access will complete in finite time but
1026                          * the timeout lets us CERROR for visibility */
1027                         struct l_wait_info lwi = LWI_TIMEOUT(10*HZ, NULL, NULL);
1028
1029                         rc = l_wait_event (request->rq_wait_for_rep,
1030                                            request->rq_replied, &lwi);
1031                         LASSERT (rc == 0 || rc == -ETIMEDOUT);
1032                         if (rc == 0) {
1033                                 spin_lock_irqsave (&request->rq_lock, flags);
1034                                 /* Ensure the callback has completed scheduling
1035                                  * me and taken its hands off the request */
1036                                 spin_unlock_irqrestore(&request->rq_lock,flags);
1037                                 break;
1038                         }
1039
1040                         CERROR ("Unexpectedly long timeout: req %p\n", request);
1041                 }
1042                 /* fall through */
1043
1044         case PTL_INV_MD:                        /* callback completed */
1045                 LASSERT (!request->rq_receiving_reply);
1046                 LASSERT (request->rq_replied);
1047                 EXIT;
1048                 return;
1049         }
1050         /* Not Reached */
1051 }
1052
1053 /* caller must hold imp->imp_lock */
1054 void ptlrpc_free_committed(struct obd_import *imp)
1055 {
1056         struct list_head *tmp, *saved;
1057         struct ptlrpc_request *req;
1058         struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1059         ENTRY;
1060
1061         LASSERT(imp != NULL);
1062
1063 #ifdef CONFIG_SMP
1064         LASSERT(spin_is_locked(&imp->imp_lock));
1065 #endif
1066
1067         CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1068                imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1069
1070         list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1071                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1072
1073                 /* XXX ok to remove when 1357 resolved - rread 05/29/03  */
1074                 LASSERT (req != last_req);
1075                 last_req = req;
1076
1077                 if (req->rq_replay) {
1078                         DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1079                         continue;
1080                 }
1081
1082                 /* not yet committed */
1083                 if (req->rq_transno > imp->imp_peer_committed_transno) {
1084                         DEBUG_REQ(D_HA, req, "stopping search");
1085                         break;
1086                 }
1087
1088                 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1089                           imp->imp_peer_committed_transno);
1090                 list_del_init(&req->rq_list);
1091                 __ptlrpc_req_finished(req, 1);
1092         }
1093
1094         EXIT;
1095         return;
1096 }
1097
1098 void ptlrpc_cleanup_client(struct obd_import *imp)
1099 {
1100         ENTRY;
1101         EXIT;
1102         return;
1103 }
1104
1105 void ptlrpc_resend_req(struct ptlrpc_request *req)
1106 {
1107         unsigned long flags;
1108
1109         DEBUG_REQ(D_HA, req, "resending");
1110         req->rq_reqmsg->handle.cookie = 0;
1111         ptlrpc_put_connection(req->rq_connection);
1112         req->rq_connection =
1113                 ptlrpc_connection_addref(req->rq_import->imp_connection);
1114         req->rq_status = -EAGAIN;
1115
1116         spin_lock_irqsave (&req->rq_lock, flags);
1117         req->rq_resend = 1;
1118         req->rq_timedout = 0;
1119         if (req->rq_set != NULL)
1120                 wake_up (&req->rq_set->set_waitq);
1121         else
1122                 wake_up(&req->rq_wait_for_rep);
1123         spin_unlock_irqrestore (&req->rq_lock, flags);
1124 }
1125
1126 /* XXX: this function and rq_status are currently unused */
1127 void ptlrpc_restart_req(struct ptlrpc_request *req)
1128 {
1129         unsigned long flags;
1130
1131         DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1132         req->rq_status = -ERESTARTSYS;
1133
1134         spin_lock_irqsave (&req->rq_lock, flags);
1135         req->rq_restart = 1;
1136         req->rq_timedout = 0;
1137         if (req->rq_set != NULL)
1138                 wake_up (&req->rq_set->set_waitq);
1139         else
1140                 wake_up(&req->rq_wait_for_rep);
1141         spin_unlock_irqrestore (&req->rq_lock, flags);
1142 }
1143
1144 static int expired_request(void *data)
1145 {
1146         struct ptlrpc_request *req = data;
1147         ENTRY;
1148
1149         RETURN(expire_one_request(req));
1150 }
1151
1152 static void interrupted_request(void *data)
1153 {
1154         unsigned long flags;
1155
1156         struct ptlrpc_request *req = data;
1157         DEBUG_REQ(D_HA, req, "request interrupted");
1158         spin_lock_irqsave (&req->rq_lock, flags);
1159         req->rq_intr = 1;
1160         spin_unlock_irqrestore (&req->rq_lock, flags);
1161 }
1162
1163 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1164 {
1165         ENTRY;
1166         atomic_inc(&req->rq_refcount);
1167         RETURN(req);
1168 }
1169
1170 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1171                                       struct obd_import *imp)
1172 {
1173         struct list_head *tmp;
1174
1175 #ifdef CONFIG_SMP
1176         LASSERT(spin_is_locked(&imp->imp_lock));
1177 #endif
1178
1179         LASSERT(imp->imp_replayable);
1180         /* Balanced in ptlrpc_free_committed, usually. */
1181         ptlrpc_request_addref(req);
1182         list_for_each_prev(tmp, &imp->imp_replay_list) {
1183                 struct ptlrpc_request *iter =
1184                         list_entry(tmp, struct ptlrpc_request, rq_list);
1185
1186                 /* We may have duplicate transnos if we create and then
1187                  * open a file, or for closes retained if to match creating
1188                  * opens, so use req->rq_xid as a secondary key.
1189                  * (See bugs 684, 685, and 428.)
1190                  * XXX no longer needed, but all opens need transnos!
1191                  */
1192                 if (iter->rq_transno > req->rq_transno)
1193                         continue;
1194
1195                 if (iter->rq_transno == req->rq_transno) {
1196                         LASSERT(iter->rq_xid != req->rq_xid);
1197                         if (iter->rq_xid > req->rq_xid)
1198                                 continue;
1199                 }
1200
1201                 list_add(&req->rq_list, &iter->rq_list);
1202                 return;
1203         }
1204
1205         list_add_tail(&req->rq_list, &imp->imp_replay_list);
1206 }
1207
1208 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1209 {
1210         int rc = 0;
1211         int brc;
1212         struct l_wait_info lwi;
1213         struct obd_import *imp = req->rq_import;
1214         struct obd_device *obd = imp->imp_obd;
1215         struct ptlrpc_connection *conn = imp->imp_connection;
1216         unsigned int flags;
1217         int do_restart = 0;
1218         int timeout = 0;
1219         ENTRY;
1220
1221         LASSERT (req->rq_set == NULL);
1222         LASSERT (!req->rq_receiving_reply);
1223
1224         /* for distributed debugging */
1225         req->rq_reqmsg->status = current->pid;
1226         LASSERT(imp->imp_obd != NULL);
1227         CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc "
1228                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1229                imp->imp_obd->obd_uuid.uuid,
1230                req->rq_reqmsg->status, req->rq_xid,
1231                conn->c_peer.peer_ni->pni_name, conn->c_peer.peer_nid,
1232                req->rq_reqmsg->opc);
1233
1234         /* Mark phase here for a little debug help */
1235         req->rq_phase = RQ_PHASE_RPC;
1236
1237 restart:
1238         /*
1239          * If the import has been invalidated (such as by an OST failure), the
1240          * request must fail with -EIO.  Recovery requests are allowed to go
1241          * through, though, so that they have a chance to revalidate the
1242          * import.
1243          */
1244         spin_lock_irqsave(&imp->imp_lock, flags);
1245         if (req->rq_import->imp_invalid && req->rq_level == LUSTRE_CONN_FULL) {
1246                 DEBUG_REQ(D_ERROR, req, "IMP_INVALID:");
1247                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1248                 GOTO (out, rc = -EIO);
1249         }
1250
1251         if (req->rq_level > imp->imp_level) {
1252                 list_del(&req->rq_list);
1253                 if (req->rq_no_recov || obd->obd_no_recov ||
1254                     imp->imp_dlm_fake) {
1255                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1256                         GOTO (out, rc = -EWOULDBLOCK);
1257                 }
1258
1259                 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1260                 spin_unlock_irqrestore(&imp->imp_lock, flags);
1261
1262                 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%d > %d)",
1263                           current->comm, req->rq_level, imp->imp_level);
1264                 lwi = LWI_INTR(NULL, NULL);
1265                 rc = l_wait_event(req->rq_wait_for_rep,
1266                                   (req->rq_level <= imp->imp_level ||
1267                                    req->rq_err),
1268                                   &lwi);
1269                 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%d > %d)",
1270                           current->comm, req->rq_level, imp->imp_level);
1271
1272                 spin_lock_irqsave(&imp->imp_lock, flags);
1273                 list_del_init(&req->rq_list);
1274
1275                 if (req->rq_err)
1276                         rc = -EIO;
1277
1278                 if (rc) {
1279                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1280                         GOTO (out, rc);
1281                 }
1282
1283                 CERROR("process %d resumed\n", current->pid);
1284         }
1285
1286         /* XXX this is the same as ptlrpc_set_wait */
1287         LASSERT(list_empty(&req->rq_list));
1288         list_add_tail(&req->rq_list, &imp->imp_sending_list);
1289         req->rq_import_generation = imp->imp_generation;
1290         spin_unlock_irqrestore(&imp->imp_lock, flags);
1291
1292         rc = ptl_send_rpc(req);
1293         if (rc) {
1294                 /* The DLM's fake imports want to avoid all forms of
1295                  * recovery. */
1296                 if (imp->imp_dlm_fake) {
1297                         spin_lock_irqsave(&imp->imp_lock, flags);
1298                         list_del_init(&req->rq_list);
1299                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1300                         GOTO(out, rc);
1301                 }
1302
1303                 DEBUG_REQ(D_ERROR, req, "send failed (%d); recovering", rc);
1304
1305                 ptlrpc_fail_import(imp, req->rq_import_generation);
1306
1307                 /* If we've been told to not wait, we're done. */
1308                 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
1309                     obd->obd_no_recov) {
1310                         spin_lock_irqsave(&imp->imp_lock, flags);
1311                         list_del_init(&req->rq_list);
1312                         spin_unlock_irqrestore(&imp->imp_lock, flags);
1313                         GOTO(out, rc);
1314                 }
1315
1316                 /* If we errored, allow the user to interrupt immediately */
1317                 timeout = 1;
1318         } else {
1319                 timeout = req->rq_timeout * HZ;
1320                 DEBUG_REQ(D_NET, req, "-- sleeping");
1321         }
1322 #ifdef __KERNEL__
1323         lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1324                                req);
1325         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1326 #else
1327         {
1328                 extern int reply_in_callback(ptl_event_t *ev);
1329                 ptl_event_t reply_ev;
1330                 PtlEQWait(req->rq_connection->c_peer.peer_ni->pni_reply_in_eq_h,
1331                           &reply_ev);
1332                 reply_in_callback(&reply_ev);
1333
1334                 LASSERT (reply_ev.mem_desc.user_ptr == (void *)req);
1335                 // ptlrpc_check_reply(req);
1336                 // not required now it only tests
1337         }
1338 #endif
1339
1340         DEBUG_REQ(D_NET, req, "-- done sleeping");
1341
1342         CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:opc "
1343                "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1344                imp->imp_obd->obd_uuid.uuid,
1345                req->rq_reqmsg->status, req->rq_xid,
1346                conn->c_peer.peer_ni->pni_name, conn->c_peer.peer_nid,
1347                req->rq_reqmsg->opc);
1348
1349         spin_lock_irqsave(&imp->imp_lock, flags);
1350         list_del_init(&req->rq_list);
1351         spin_unlock_irqrestore(&imp->imp_lock, flags);
1352
1353         /* If the reply was received normally, this just grabs the spinlock
1354          * (ensuring the reply callback has returned), sees that
1355          * req->rq_receiving_reply is clear and returns. */
1356         ptlrpc_unregister_reply (req);
1357
1358         if (req->rq_err)
1359                 GOTO(out, rc = -EIO);
1360
1361         /* Resend if we need to, unless we were interrupted. */
1362         if (req->rq_resend && !req->rq_intr) {
1363                 /* ...unless we were specifically told otherwise. */
1364                 if (req->rq_no_resend) {
1365                         spin_lock_irqsave (&req->rq_lock, flags);
1366                         req->rq_no_resend = 0;
1367                         spin_unlock_irqrestore (&req->rq_lock, flags);
1368                         GOTO(out, rc = -ETIMEDOUT);
1369                 }
1370                 spin_lock_irqsave (&req->rq_lock, flags);
1371                 req->rq_resend = 0;
1372                 spin_unlock_irqrestore (&req->rq_lock, flags);
1373                 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1374
1375                 if (req->rq_bulk != NULL)
1376                         ptlrpc_unregister_bulk (req);
1377
1378                 DEBUG_REQ(D_HA, req, "resending: ");
1379                 goto restart;
1380         }
1381
1382         if (req->rq_intr) {
1383                 /* Should only be interrupted if we timed out. */
1384                 if (!req->rq_timedout)
1385                         DEBUG_REQ(D_ERROR, req,
1386                                   "rq_intr set but rq_timedout not");
1387                 GOTO(out, rc = -EINTR);
1388         }
1389
1390         if (req->rq_timedout) {                 /* non-recoverable timeout */
1391                 GOTO(out, rc = -ETIMEDOUT);
1392         }
1393
1394         if (!req->rq_replied) {
1395                 /* How can this be? -eeb */
1396                 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1397                 LBUG();
1398                 GOTO(out, rc = req->rq_status);
1399         }
1400
1401         rc = after_reply (req, &do_restart);
1402         /* NB may return +ve success rc */
1403         if (do_restart) {
1404                 if (req->rq_bulk != NULL)
1405                         ptlrpc_unregister_bulk (req);
1406                 DEBUG_REQ(D_HA, req, "resending: ");
1407                 goto restart;
1408         }
1409
1410  out:
1411         if (req->rq_bulk != NULL) {
1412                 if (rc >= 0) {                  /* success so far */
1413                         lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1414                         brc = l_wait_event(req->rq_wait_for_rep,
1415                                            ptlrpc_bulk_complete(req->rq_bulk),
1416                                            &lwi);
1417                         if (brc != 0) {
1418                                 LASSERT (brc == -ETIMEDOUT);
1419                                 CERROR ("Timed out waiting for bulk\n");
1420                                 rc = brc;
1421                         }
1422                 }
1423                 if (rc < 0) {
1424                         /* MDS blocks for put ACKs before replying */
1425                         /* OSC sets rq_no_resend for the time being */
1426                         LASSERT (req->rq_no_resend);
1427                         ptlrpc_unregister_bulk (req);
1428                 }
1429         }
1430
1431         LASSERT (!req->rq_receiving_reply);
1432         req->rq_phase = RQ_PHASE_INTERPRET;
1433         RETURN (rc);
1434 }
1435
1436 int ptlrpc_replay_req(struct ptlrpc_request *req)
1437 {
1438         int rc = 0, old_level, old_status = 0;
1439         // struct ptlrpc_client *cli = req->rq_import->imp_client;
1440         struct l_wait_info lwi;
1441         ENTRY;
1442
1443         /* I don't touch rq_phase here, so the debug log can show what
1444          * state it was left in */
1445
1446         /* Not handling automatic bulk replay yet (or ever?) */
1447         LASSERT (req->rq_bulk == NULL);
1448
1449         DEBUG_REQ(D_NET, req, "about to replay");
1450
1451         /* Update request's state, since we might have a new connection. */
1452         ptlrpc_put_connection(req->rq_connection);
1453         req->rq_connection =
1454                 ptlrpc_connection_addref(req->rq_import->imp_connection);
1455
1456         /* temporarily set request to RECOVD level (reset at out:) */
1457         old_level = req->rq_level;
1458         if (req->rq_replied)
1459                 old_status = req->rq_repmsg->status;
1460         req->rq_level = LUSTRE_CONN_RECOVD;
1461         rc = ptl_send_rpc(req);
1462         if (rc) {
1463                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
1464                 ptlrpc_cleanup_request_buf(req);
1465                 // up(&cli->cli_rpc_sem);
1466                 GOTO(out, rc = -rc);
1467         }
1468
1469         CDEBUG(D_OTHER, "-- sleeping\n");
1470         lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
1471         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1472         CDEBUG(D_OTHER, "-- done\n");
1473
1474         // up(&cli->cli_rpc_sem);
1475
1476         /* If the reply was received normally, this just grabs the spinlock
1477          * (ensuring the reply callback has returned), sees that
1478          * req->rq_receiving_reply is clear and returns. */
1479         ptlrpc_unregister_reply (req);
1480
1481         if (!req->rq_replied) {
1482                 CERROR("Unknown reason for wakeup\n");
1483                 /* XXX Phil - I end up here when I kill obdctl */
1484                 /* ...that's because signals aren't all masked in
1485                  * l_wait_event() -eeb */
1486                 GOTO(out, rc = -EINTR);
1487         }
1488
1489 #if SWAB_PARANOIA
1490         /* Clear reply swab mask; this is a new reply in sender's byte order */
1491         req->rq_rep_swab_mask = 0;
1492 #endif
1493         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
1494         if (rc) {
1495                 CERROR("unpack_rep failed: %d\n", rc);
1496                 GOTO(out, rc = -EPROTO);
1497         }
1498 #if 0
1499         /* FIXME: Enable when BlueArc makes new release */
1500         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
1501             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
1502                 CERROR("invalid packet type received (type=%u)\n",
1503                        req->rq_repmsg->type);
1504                 GOTO(out, rc = -EPROTO);
1505         }
1506 #endif
1507
1508         /* The transno had better not change over replay. */
1509         LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1510
1511         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
1512
1513         /* let the callback do fixups, possibly including in the request */
1514         if (req->rq_replay_cb)
1515                 req->rq_replay_cb(req);
1516
1517         if (req->rq_replied && req->rq_repmsg->status != old_status) {
1518                 DEBUG_REQ(D_HA, req, "status %d, old was %d",
1519                           req->rq_repmsg->status, old_status);
1520         }
1521
1522  out:
1523         req->rq_level = old_level;
1524         RETURN(rc);
1525 }
1526
1527 void ptlrpc_abort_inflight(struct obd_import *imp)
1528 {
1529         unsigned long flags;
1530         struct list_head *tmp, *n;
1531         ENTRY;
1532
1533         /* Make sure that no new requests get processed for this import.
1534          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1535          * this flag and then putting requests on sending_list or delayed_list.
1536          */
1537         spin_lock_irqsave(&imp->imp_lock, flags);
1538         if (!imp->imp_replayable)
1539                 /* on b_devel, I moved this line to
1540                    ptlrpc_set_import_active because I thought it made
1541                    more sense there and possibly not all callers of
1542                    this function expect this. I'll leave it here until
1543                    I can figure out if it's correct or not. - rread 5/12/03  */
1544                 imp->imp_invalid = 1;
1545
1546         /* XXX locking?  Maybe we should remove each request with the list
1547          * locked?  Also, how do we know if the requests on the list are
1548          * being freed at this time?
1549          */
1550         list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1551                 struct ptlrpc_request *req =
1552                         list_entry(tmp, struct ptlrpc_request, rq_list);
1553
1554                 DEBUG_REQ(D_HA, req, "inflight");
1555
1556                 spin_lock (&req->rq_lock);
1557                 req->rq_err = 1;
1558                 if (req->rq_set != NULL)
1559                         wake_up(&req->rq_set->set_waitq);
1560                 else
1561                         wake_up(&req->rq_wait_for_rep);
1562                 spin_unlock (&req->rq_lock);
1563         }
1564
1565         list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1566                 struct ptlrpc_request *req =
1567                         list_entry(tmp, struct ptlrpc_request, rq_list);
1568
1569                 DEBUG_REQ(D_HA, req, "aborting waiting req");
1570
1571                 spin_lock (&req->rq_lock);
1572                 req->rq_err = 1;
1573                 if (req->rq_set != NULL)
1574                         wake_up(&req->rq_set->set_waitq);
1575                 else
1576                         wake_up(&req->rq_wait_for_rep);
1577                 spin_unlock (&req->rq_lock);
1578         }
1579
1580         /* Last chance to free reqs left on the replay list, but we
1581          * will still leak reqs that haven't comitted.  */
1582         if (imp->imp_replayable)
1583                 ptlrpc_free_committed(imp);
1584
1585         spin_unlock_irqrestore(&imp->imp_lock, flags);
1586
1587         EXIT;
1588 }
1589
1590 static __u64 ptlrpc_last_xid = 0;
1591 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1592
1593 __u64 ptlrpc_next_xid(void)
1594 {
1595         __u64 tmp;
1596         spin_lock(&ptlrpc_last_xid_lock);
1597         tmp = ++ptlrpc_last_xid;
1598         spin_unlock(&ptlrpc_last_xid_lock);
1599         return tmp;
1600 }
1601
1602