Whamcloud - gitweb
d6df6054de59a890e627625f0fb135db3e617d1a
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/obd_class.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/lustre_ha.h>
29 #include <linux/lustre_import.h>
30
31 void ptlrpc_init_client(int req_portal, int rep_portal, char *name, 
32                         struct ptlrpc_client *cl)
33 {
34         cl->cli_request_portal = req_portal;
35         cl->cli_reply_portal   = rep_portal;
36         cl->cli_name           = name;
37 }
38
39 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
40 {
41         return req->rq_connection->c_remote_uuid;
42 }
43
44 struct ptlrpc_connection *ptlrpc_uuid_to_connection(obd_uuid_t uuid)
45 {
46         struct ptlrpc_connection *c;
47         struct lustre_peer peer;
48         int err;
49
50         err = kportal_uuid_to_peer(uuid, &peer);
51         if (err != 0) {
52                 CERROR("cannot find peer %s!\n", uuid);
53                 return NULL;
54         }
55
56         c = ptlrpc_get_connection(&peer, uuid);
57         if (c) { 
58                 memcpy(c->c_remote_uuid, uuid, sizeof(c->c_remote_uuid));
59                 c->c_epoch++;
60         }
61
62         CDEBUG(D_INFO, "%s -> %p\n", uuid, c);
63
64         return c;
65 }
66
67 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,obd_uuid_t uuid)
68 {
69         struct lustre_peer peer;
70         int err;
71
72         err = kportal_uuid_to_peer(uuid, &peer);
73         if (err != 0) {
74                 CERROR("cannot find peer %s!\n", uuid);
75                 return;
76         }
77         
78         memcpy(&conn->c_peer, &peer, sizeof(peer)); 
79         return;
80 }
81
82 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
83 {
84         struct ptlrpc_bulk_desc *desc;
85
86         OBD_ALLOC(desc, sizeof(*desc));
87         if (desc != NULL) {
88                 desc->bd_connection = ptlrpc_connection_addref(conn);
89                 atomic_set(&desc->bd_refcount, 1);
90                 init_waitqueue_head(&desc->bd_waitq);
91                 INIT_LIST_HEAD(&desc->bd_page_list);
92                 ptl_set_inv_handle(&desc->bd_md_h);
93                 ptl_set_inv_handle(&desc->bd_me_h);
94         }
95
96         return desc;
97 }
98
99 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
100 {
101         struct ptlrpc_bulk_page *bulk;
102
103         OBD_ALLOC(bulk, sizeof(*bulk));
104         if (bulk != NULL) {
105                 bulk->bp_desc = desc;
106                 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
107                 desc->bd_page_count++;
108         }
109         return bulk;
110 }
111
112 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
113 {
114         struct list_head *tmp, *next;
115         ENTRY;
116         if (desc == NULL) {
117                 EXIT;
118                 return;
119         }
120
121         list_for_each_safe(tmp, next, &desc->bd_page_list) {
122                 struct ptlrpc_bulk_page *bulk;
123                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
124                 ptlrpc_free_bulk_page(bulk);
125         }
126
127         ptlrpc_put_connection(desc->bd_connection);
128
129         OBD_FREE(desc, sizeof(*desc));
130         EXIT;
131 }
132
133 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
134 {
135         ENTRY;
136         if (bulk == NULL) {
137                 EXIT;
138                 return;
139         }
140
141         list_del(&bulk->bp_link);
142         bulk->bp_desc->bd_page_count--;
143         OBD_FREE(bulk, sizeof(*bulk));
144         EXIT;
145 }
146
147 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
148                                        int count, int *lengths, char **bufs)
149 {
150         struct ptlrpc_connection *conn = imp->imp_connection;
151         struct ptlrpc_request *request;
152         int rc;
153         ENTRY;
154
155         OBD_ALLOC(request, sizeof(*request));
156         if (!request) {
157                 CERROR("request allocation out of memory\n");
158                 RETURN(NULL);
159         }
160
161         rc = lustre_pack_msg(count, lengths, bufs,
162                              &request->rq_reqlen, &request->rq_reqmsg);
163         if (rc) {
164                 CERROR("cannot pack request %d\n", rc);
165                 OBD_FREE(request, sizeof(*request));
166                 RETURN(NULL);
167         }
168
169         request->rq_level = LUSTRE_CONN_FULL;
170         request->rq_type = PTL_RPC_MSG_REQUEST;
171         request->rq_import = imp;
172         request->rq_connection = ptlrpc_connection_addref(conn);
173
174         INIT_LIST_HEAD(&request->rq_list);
175         /*
176          * This will be reduced once when the sender is finished (waiting for
177          * reply, f.e.), and once when the request has been committed and is
178          * removed from the to-be-committed list.
179          *
180          * Also, the refcount will be increased in ptl_send_rpc immediately
181          * before we hand it off to portals, and there will be a corresponding
182          * decrease in request_out_cb (which is called to indicate that portals
183          * is finished with the request, and it can be safely freed).
184          *
185          * (Except in the DLM server case, where it will be dropped twice
186          * by the sender, and then the last time by request_out_callback.)
187          */
188         atomic_set(&request->rq_refcount, 2);
189
190         spin_lock(&conn->c_lock);
191         request->rq_xid = HTON__u32(++conn->c_xid_out);
192         spin_unlock(&conn->c_lock);
193
194         request->rq_reqmsg->magic = PTLRPC_MSG_MAGIC; 
195         request->rq_reqmsg->version = PTLRPC_MSG_VERSION;
196         request->rq_reqmsg->opc = HTON__u32(opcode);
197
198         ptlrpc_hdl2req(request, &imp->imp_handle);
199         RETURN(request);
200 }
201
202 void ptlrpc_req_finished(struct ptlrpc_request *request)
203 {
204         if (request == NULL)
205                 return;
206
207         if (atomic_dec_and_test(&request->rq_refcount))
208                 ptlrpc_free_req(request);
209 }
210
211 void ptlrpc_free_req(struct ptlrpc_request *request)
212 {
213         ENTRY;
214         if (request == NULL) {
215                 EXIT;
216                 return;
217         }
218
219         if (atomic_read(&request->rq_refcount) != 0) {
220                 CERROR("freeing request %p (%d->%s:%d) with refcount %d\n",
221                        request, request->rq_reqmsg->opc,
222                        request->rq_connection->c_remote_uuid,
223                        request->rq_import->imp_client->cli_request_portal,
224                        request->rq_refcount);
225                 /* LBUG(); */
226         }
227
228         if (request->rq_repmsg != NULL) { 
229                 OBD_FREE(request->rq_repmsg, request->rq_replen);
230                 request->rq_repmsg = NULL;
231                 request->rq_reply_md.start = NULL; 
232         }
233         if (request->rq_reqmsg != NULL) {
234                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
235                 request->rq_reqmsg = NULL;
236         }
237
238         if (request->rq_connection) {
239                 spin_lock(&request->rq_connection->c_lock);
240                 list_del_init(&request->rq_list);
241                 spin_unlock(&request->rq_connection->c_lock);
242         }
243
244         ptlrpc_put_connection(request->rq_connection);
245         OBD_FREE(request, sizeof(*request));
246         EXIT;
247 }
248
249 static int ptlrpc_check_reply(struct ptlrpc_request *req)
250 {
251         int rc = 0;
252
253         if (req->rq_repmsg != NULL) {
254                 struct ptlrpc_connection *conn = req->rq_import->imp_connection;
255                 spin_lock(&conn->c_lock);
256                 if (req->rq_level > conn->c_level) {
257                         CDEBUG(D_HA,
258                                "rep to xid "LPD64" op %d to %s:%d: "
259                                "recovery started, ignoring (%d > %d)\n",
260                                (unsigned long long)req->rq_xid,
261                                req->rq_reqmsg->opc, conn->c_remote_uuid,
262                                req->rq_import->imp_client->cli_request_portal,
263                                req->rq_level, conn->c_level);
264                         req->rq_repmsg = NULL;
265                         spin_unlock(&conn->c_lock);
266                         GOTO(out, rc = 0);
267                 }
268                 spin_unlock(&conn->c_lock);
269                 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
270                 req->rq_flags |= PTL_RPC_FL_REPLIED;
271                 GOTO(out, rc = 1);
272         }
273
274         if (req->rq_flags & PTL_RPC_FL_RESEND) { 
275                 CERROR("-- RESTART --\n");
276                 GOTO(out, rc = 1);
277         }
278
279         if (req->rq_flags & PTL_RPC_FL_ERR) {
280                 CERROR("-- ABORTED --\n");
281                 GOTO(out, rc = 1);
282         }
283
284  out:
285         CDEBUG(D_NET, "req = %p, rc = %d\n", req, rc);
286         return rc;
287 }
288
289 int ptlrpc_check_status(struct ptlrpc_request *req, int err)
290 {
291         ENTRY;
292
293         if (err != 0) {
294                 CERROR("err is %d\n", err);
295                 RETURN(err);
296         }
297
298         if (req == NULL) {
299                 CERROR("req == NULL\n");
300                 RETURN(-ENOMEM);
301         }
302
303         if (req->rq_repmsg == NULL) {
304                 CERROR("req->rq_repmsg == NULL\n");
305                 RETURN(-ENOMEM);
306         }
307
308         err = req->rq_repmsg->status;
309         if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
310                 CERROR("req->rq_repmsg->type == PTL_RPC_MSG_ERR\n");
311                 RETURN(err ? err : -EINVAL);
312         }
313
314         if (err != 0) {
315                 if (err < 0)
316                         CERROR("req->rq_repmsg->status is %d\n", err);
317                 else
318                         CDEBUG(D_INFO, "req->rq_repmsg->status is %d\n", err);
319                 /* XXX: translate this error from net to host */
320                 RETURN(err);
321         }
322
323         RETURN(0);
324 }
325
326 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
327 {
328         OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
329         request->rq_reqmsg = NULL;
330         request->rq_reqlen = 0;
331 }
332
333 /* Abort this request and cleanup any resources associated with it. */
334 static int ptlrpc_abort(struct ptlrpc_request *request)
335 {
336         /* First remove the ME for the reply; in theory, this means
337          * that we can tear down the buffer safely. */
338         PtlMEUnlink(request->rq_reply_me_h);
339         OBD_FREE(request->rq_reply_md.start, request->rq_replen);
340         request->rq_repmsg = NULL;
341         request->rq_replen = 0;
342         return 0;
343 }
344
345 /* caller must hold conn->c_lock */
346 void ptlrpc_free_committed(struct ptlrpc_connection *conn)
347 {
348         struct list_head *tmp, *saved;
349         struct ptlrpc_request *req;
350
351 restart:
352         list_for_each_safe(tmp, saved, &conn->c_sending_head) {
353                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
354
355                 if (req->rq_flags & PTL_RPC_FL_REPLAY) {
356                         CDEBUG(D_INFO, "Keeping req %p xid "LPD64" for replay\n",
357                                req, req->rq_xid);
358                         continue;
359                 }
360
361                 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
362                         CDEBUG(D_INFO, "Keeping in-flight req %p xid "LPD64
363                                " for replay\n", req, req->rq_xid);
364                         continue;
365                 }
366
367                 /* not yet committed */
368                 if (req->rq_transno > conn->c_last_committed)
369                         break;
370                 
371                 CDEBUG(D_INFO, "Marking request %p xid %Ld as committed "
372                        "transno=%Lu, last_committed=%Lu\n", req,
373                        (long long)req->rq_xid, (long long)req->rq_transno,
374                        (long long)conn->c_last_committed);
375                 if (atomic_dec_and_test(&req->rq_refcount)) {
376                         /* We do this to prevent free_req deadlock.
377                          * Restarting after each removal is not so bad, as we are
378                          * almost always deleting the first item in the list.
379                          *
380                          * If we use a recursive lock here, we can skip the
381                          * unlock/lock/restart sequence.
382                          */
383                         spin_unlock(&conn->c_lock);
384                         ptlrpc_free_req(req);
385                         spin_lock(&conn->c_lock);
386                         goto restart;
387                 } else {
388                         list_del(&req->rq_list);
389                         list_add(&req->rq_list, &conn->c_dying_head);
390                 }
391         }
392
393         EXIT;
394         return;
395 }
396
397 void ptlrpc_cleanup_client(struct obd_import *imp)
398 {
399         struct list_head *tmp, *saved;
400         struct ptlrpc_request *req;
401         struct ptlrpc_connection *conn = imp->imp_connection;
402         ENTRY;
403
404         LASSERT(conn);
405
406 restart1:
407         spin_lock(&conn->c_lock);
408         list_for_each_safe(tmp, saved, &conn->c_sending_head) {
409                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
410                 if (req->rq_import != imp)
411                         continue;
412                 /* XXX we should make sure that nobody's sleeping on these! */
413                 CDEBUG(D_INFO, "Cleaning req %p from sending list.\n", req);
414                 list_del_init(&req->rq_list);
415                 req->rq_import = NULL;
416                 spin_unlock(&conn->c_lock);
417                 ptlrpc_req_finished(req);
418                 goto restart1;
419         }
420 restart2:
421         list_for_each_safe(tmp, saved, &conn->c_dying_head) {
422                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
423                 if (req->rq_import != imp)
424                         continue;
425                 CERROR("Request %p is on the dying list at cleanup!\n", req);
426                 list_del_init(&req->rq_list);
427                 req->rq_import = NULL;
428                 spin_unlock(&conn->c_lock);
429                 ptlrpc_req_finished(req); 
430                 spin_lock(&conn->c_lock);
431                 goto restart2;
432         }
433         spin_unlock(&conn->c_lock);
434
435         EXIT;
436         return;
437 }
438
439 void ptlrpc_continue_req(struct ptlrpc_request *req)
440 {
441         ENTRY;
442         CDEBUG(D_HA, "continue delayed request "LPD64" opc %d\n", 
443                req->rq_xid, req->rq_reqmsg->opc); 
444         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
445         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
446         wake_up(&req->rq_wait_for_rep); 
447         EXIT;
448 }
449
450 void ptlrpc_resend_req(struct ptlrpc_request *req)
451 {
452         ENTRY;
453         CDEBUG(D_HA, "resend request "LPD64", opc %d\n", 
454                req->rq_xid, req->rq_reqmsg->opc);
455         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
456         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
457         req->rq_status = -EAGAIN;
458         req->rq_level = LUSTRE_CONN_RECOVD;
459         req->rq_flags |= PTL_RPC_FL_RESEND;
460         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
461         wake_up(&req->rq_wait_for_rep);
462         EXIT;
463 }
464
465 void ptlrpc_restart_req(struct ptlrpc_request *req)
466 {
467         ENTRY;
468         CDEBUG(D_HA, "restart completed request "LPD64", opc %d\n", 
469                req->rq_xid, req->rq_reqmsg->opc);
470         req->rq_status = -ERESTARTSYS;
471         req->rq_flags |= PTL_RPC_FL_RECOVERY;
472         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
473         wake_up(&req->rq_wait_for_rep);
474         EXIT;
475 }
476
477 static int expired_request(void *data)
478 {
479         struct ptlrpc_request *req = data;
480
481         ENTRY;
482         CERROR("req xid "LPD64" op %d: timeout on conn to %s:%d\n",
483                (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
484                req->rq_connection->c_remote_uuid,
485                req->rq_import->imp_client->cli_request_portal);
486         req->rq_flags |= PTL_RPC_FL_TIMEOUT;
487         if (!req->rq_import->imp_connection->c_recovd_data.rd_recovd)
488                 RETURN(1);
489
490         req->rq_timeout = 0;
491         req->rq_connection->c_level = LUSTRE_CONN_RECOVD;
492         recovd_conn_fail(req->rq_import->imp_connection);
493
494         /* If this request is for recovery or other primordial tasks,
495          * don't go back to sleep.
496          */
497         if (req->rq_level < LUSTRE_CONN_FULL)
498                 RETURN(1);
499         RETURN(0);
500 }
501
502 static int interrupted_request(void *data)
503 {
504         struct ptlrpc_request *req = data;
505         ENTRY;
506         req->rq_flags |= PTL_RPC_FL_INTR;
507         RETURN(1); /* ignored, as of this writing */
508 }
509
510 /* If we're being torn down by umount -f, or the import has been
511  * invalidated (such as by an OST failure), the request must fail with
512  * -EIO.
513  *
514  * Must be called with conn->c_lock held, will drop it if it returns -EIO.
515  *
516  * XXX this should just be testing the import, and umount_begin shouldn't touch
517  * XXX the connection.
518  */
519 #define EIO_IF_INVALID(conn, req)                                             \
520 if ((conn->c_flags & CONN_INVALID) ||                                         \
521     (req->rq_import->imp_flags & IMP_INVALID)) {                              \
522         CERROR("req xid "LPD64" op %d to %s:%d: %s_INVALID\n",                \
523                (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,          \
524                req->rq_connection->c_remote_uuid,                             \
525                req->rq_import->imp_client->cli_request_portal,                \
526                (conn->c_flags & CONN_INVALID) ? "CONN_" : "IMP_");            \
527         spin_unlock(&conn->c_lock);                                           \
528         RETURN(-EIO);                                                         \
529 }        
530
531 int ptlrpc_queue_wait(struct ptlrpc_request *req)
532 {
533         int rc = 0;
534         struct l_wait_info lwi;
535         struct ptlrpc_client *cli = req->rq_import->imp_client;
536         struct ptlrpc_connection *conn = req->rq_import->imp_connection;
537         ENTRY;
538
539         init_waitqueue_head(&req->rq_wait_for_rep);
540         CDEBUG(D_NET, "subsys: %s req "LPD64" opc %d level %d, conn level %d\n",
541                cli->cli_name, req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
542                req->rq_connection->c_level);
543
544         /* XXX probably both an import and connection level are needed */
545         if (req->rq_level > conn->c_level) { 
546                 spin_lock(&conn->c_lock);
547                 EIO_IF_INVALID(conn, req);
548                 list_del(&req->rq_list);
549                 list_add_tail(&req->rq_list, &conn->c_delayed_head);
550                 spin_unlock(&conn->c_lock);
551
552                 CDEBUG(D_HA, "req xid "LPD64" op %d to %s:%d: waiting for "
553                        "recovery (%d < %d)\n",
554                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
555                        req->rq_connection->c_remote_uuid,
556                        req->rq_import->imp_client->cli_request_portal,
557                        req->rq_level, conn->c_level);
558
559                 lwi = LWI_INTR(NULL, NULL);
560                 rc = l_wait_event(req->rq_wait_for_rep,
561                                   (req->rq_level <= conn->c_level) ||
562                                   (req->rq_flags & PTL_RPC_FL_ERR), &lwi);
563
564                 spin_lock(&conn->c_lock);
565                 list_del_init(&req->rq_list);
566                 spin_unlock(&conn->c_lock);
567
568                 if (req->rq_flags & PTL_RPC_FL_ERR)
569                         RETURN(-EIO);
570
571                 if (rc)
572                         RETURN(rc);
573                 
574                 CERROR("process %d resumed\n", current->pid);
575         }
576  resend:
577         req->rq_timeout = obd_timeout;
578         spin_lock(&conn->c_lock);
579         EIO_IF_INVALID(conn, req);
580         
581         list_del(&req->rq_list);
582         list_add_tail(&req->rq_list, &conn->c_sending_head);
583         spin_unlock(&conn->c_lock);
584         rc = ptl_send_rpc(req);
585         if (rc) {
586                 CDEBUG(D_HA, "error %d, opcode %d, need recovery\n", rc,
587                        req->rq_reqmsg->opc);
588                 /* the sleep below will time out, triggering recovery */
589         }
590
591         CDEBUG(D_NET, "-- sleeping on req xid "LPD64" op %d to %s:%d\n",
592                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
593                        req->rq_connection->c_remote_uuid,
594                        req->rq_import->imp_client->cli_request_portal);
595         lwi = LWI_TIMEOUT_INTR(req->rq_timeout * HZ, expired_request,
596                                interrupted_request,req);
597         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
598         CDEBUG(D_NET, "-- done sleeping on req xid "LPD64" op %d to %s:%d\n",
599                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
600                        req->rq_connection->c_remote_uuid,
601                        req->rq_import->imp_client->cli_request_portal);
602
603         if (req->rq_flags & PTL_RPC_FL_ERR) {
604                 ptlrpc_abort(req);
605                 GOTO(out, rc = -EIO);
606         }
607
608         /* Don't resend if we were interrupted. */
609         if ((req->rq_flags & (PTL_RPC_FL_RESEND | PTL_RPC_FL_INTR)) ==
610             PTL_RPC_FL_RESEND) {
611                 req->rq_flags &= ~PTL_RPC_FL_RESEND;
612                 CDEBUG(D_HA, "resending req xid "LPD64" op %d to %s:%d\n",
613                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
614                        req->rq_connection->c_remote_uuid,
615                        req->rq_import->imp_client->cli_request_portal);
616                 goto resend;
617         }
618
619         // up(&cli->cli_rpc_sem);
620         if (req->rq_flags & PTL_RPC_FL_INTR) {
621                 if (!(req->rq_flags & PTL_RPC_FL_TIMEOUT))
622                         LBUG(); /* should only be interrupted if we timed out */
623                 /* Clean up the dangling reply buffers */
624                 ptlrpc_abort(req);
625                 GOTO(out, rc = -EINTR);
626         }
627
628         if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
629                 GOTO(out, rc = -ETIMEDOUT);
630
631         if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
632                 GOTO(out, rc = req->rq_status);
633
634         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
635         if (rc) {
636                 CERROR("unpack_rep failed: %d\n", rc);
637                 GOTO(out, rc);
638         }
639 #if 0
640         /* FIXME: Enable when BlueArc makes new release */
641         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
642             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
643                 CERROR("invalid packet type received (type=%u)\n",
644                        req->rq_repmsg->type);
645                 LBUG();
646                 GOTO(out, rc = -EINVAL);
647         }
648 #endif
649         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
650         if (req->rq_repmsg->status == 0)
651                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
652                        req->rq_replen, req->rq_repmsg->status);
653
654         spin_lock(&conn->c_lock);
655         conn->c_last_xid = req->rq_repmsg->last_xid;
656         conn->c_last_committed = req->rq_repmsg->last_committed;
657         ptlrpc_free_committed(conn);
658         spin_unlock(&conn->c_lock);
659
660         EXIT;
661  out:
662         return rc;
663 }
664
665 #undef EIO_IF_INVALID
666
667 int ptlrpc_replay_req(struct ptlrpc_request *req)
668 {
669         int rc = 0, old_level;
670         // struct ptlrpc_client *cli = req->rq_import->imp_client;
671         struct l_wait_info lwi;
672         ENTRY;
673
674         init_waitqueue_head(&req->rq_wait_for_rep);
675         CDEBUG(D_NET, "req "LPD64" opc %d level %d, conn level %d\n",
676                req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
677                req->rq_connection->c_level);
678
679         req->rq_timeout = obd_timeout;
680         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
681         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
682
683         /* temporarily set request to RECOVD level (reset at out:) */
684         old_level = req->rq_level;
685         req->rq_level = LUSTRE_CONN_RECOVD;
686         rc = ptl_send_rpc(req);
687         if (rc) {
688                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
689                 ptlrpc_cleanup_request_buf(req);
690                 // up(&cli->cli_rpc_sem);
691                 GOTO(out, rc = -rc);
692         }
693
694         CDEBUG(D_OTHER, "-- sleeping\n");
695         lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
696         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
697         CDEBUG(D_OTHER, "-- done\n");
698
699         // up(&cli->cli_rpc_sem);
700
701         if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
702                 CERROR("Unknown reason for wakeup\n");
703                 /* XXX Phil - I end up here when I kill obdctl */
704                 ptlrpc_abort(req);
705                 GOTO(out, rc = -EINTR);
706         }
707
708         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
709         if (rc) {
710                 CERROR("unpack_rep failed: %d\n", rc);
711                 GOTO(out, rc);
712         }
713
714         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
715
716         /* let the callback do fixups, possibly including in the request */
717         if (req->rq_replay_cb)
718                 req->rq_replay_cb(req, req->rq_replay_cb_data);
719
720         if (req->rq_repmsg->status == 0) {
721                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
722                        req->rq_replen, req->rq_repmsg->status);
723         } else {
724                 CERROR("recovery failed: "); 
725                 CERROR("req "LPD64" opc %d level %d, conn level %d\n", 
726                        req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
727                        req->rq_connection->c_level);
728                 LBUG();
729         }
730
731  out:
732         req->rq_level = old_level;
733         RETURN(rc);
734 }