Whamcloud - gitweb
Only store replayable (MDC, with transno) requests on the sending_head after
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/obd_class.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/lustre_ha.h>
29 #include <linux/lustre_import.h>
30
31 void ptlrpc_init_client(int req_portal, int rep_portal, char *name, 
32                         struct ptlrpc_client *cl)
33 {
34         cl->cli_request_portal = req_portal;
35         cl->cli_reply_portal   = rep_portal;
36         cl->cli_name           = name;
37 }
38
39 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
40 {
41         return req->rq_connection->c_remote_uuid;
42 }
43
44 struct ptlrpc_connection *ptlrpc_uuid_to_connection(obd_uuid_t uuid)
45 {
46         struct ptlrpc_connection *c;
47         struct lustre_peer peer;
48         int err;
49
50         err = kportal_uuid_to_peer(uuid, &peer);
51         if (err != 0) {
52                 CERROR("cannot find peer %s!\n", uuid);
53                 return NULL;
54         }
55
56         c = ptlrpc_get_connection(&peer, uuid);
57         if (c) { 
58                 memcpy(c->c_remote_uuid, uuid, sizeof(c->c_remote_uuid));
59                 c->c_epoch++;
60         }
61
62         CDEBUG(D_INFO, "%s -> %p\n", uuid, c);
63
64         return c;
65 }
66
67 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,obd_uuid_t uuid)
68 {
69         struct lustre_peer peer;
70         int err;
71
72         err = kportal_uuid_to_peer(uuid, &peer);
73         if (err != 0) {
74                 CERROR("cannot find peer %s!\n", uuid);
75                 return;
76         }
77         
78         memcpy(&conn->c_peer, &peer, sizeof(peer)); 
79         return;
80 }
81
82 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
83 {
84         struct ptlrpc_bulk_desc *desc;
85
86         OBD_ALLOC(desc, sizeof(*desc));
87         if (desc != NULL) {
88                 desc->bd_connection = ptlrpc_connection_addref(conn);
89                 atomic_set(&desc->bd_refcount, 1);
90                 init_waitqueue_head(&desc->bd_waitq);
91                 INIT_LIST_HEAD(&desc->bd_page_list);
92                 ptl_set_inv_handle(&desc->bd_md_h);
93                 ptl_set_inv_handle(&desc->bd_me_h);
94         }
95
96         return desc;
97 }
98
99 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
100 {
101         struct ptlrpc_bulk_page *bulk;
102
103         OBD_ALLOC(bulk, sizeof(*bulk));
104         if (bulk != NULL) {
105                 bulk->bp_desc = desc;
106                 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
107                 desc->bd_page_count++;
108         }
109         return bulk;
110 }
111
112 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
113 {
114         struct list_head *tmp, *next;
115         ENTRY;
116         if (desc == NULL) {
117                 EXIT;
118                 return;
119         }
120
121         list_for_each_safe(tmp, next, &desc->bd_page_list) {
122                 struct ptlrpc_bulk_page *bulk;
123                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
124                 ptlrpc_free_bulk_page(bulk);
125         }
126
127         ptlrpc_put_connection(desc->bd_connection);
128
129         OBD_FREE(desc, sizeof(*desc));
130         EXIT;
131 }
132
133 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
134 {
135         ENTRY;
136         if (bulk == NULL) {
137                 EXIT;
138                 return;
139         }
140
141         list_del(&bulk->bp_link);
142         bulk->bp_desc->bd_page_count--;
143         OBD_FREE(bulk, sizeof(*bulk));
144         EXIT;
145 }
146
147 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
148                                        int count, int *lengths, char **bufs)
149 {
150         struct ptlrpc_connection *conn = imp->imp_connection;
151         struct ptlrpc_request *request;
152         int rc;
153         ENTRY;
154
155         OBD_ALLOC(request, sizeof(*request));
156         if (!request) {
157                 CERROR("request allocation out of memory\n");
158                 RETURN(NULL);
159         }
160
161         rc = lustre_pack_msg(count, lengths, bufs,
162                              &request->rq_reqlen, &request->rq_reqmsg);
163         if (rc) {
164                 CERROR("cannot pack request %d\n", rc);
165                 OBD_FREE(request, sizeof(*request));
166                 RETURN(NULL);
167         }
168
169         request->rq_level = LUSTRE_CONN_FULL;
170         request->rq_type = PTL_RPC_MSG_REQUEST;
171         request->rq_import = imp;
172
173         /* XXX FIXME bug 625069 */
174         request->rq_request_portal = imp->imp_client->cli_request_portal;
175         request->rq_reply_portal = imp->imp_client->cli_reply_portal;
176
177         request->rq_connection = ptlrpc_connection_addref(conn);
178
179         INIT_LIST_HEAD(&request->rq_list);
180         /*
181          * This will be reduced once when the sender is finished (waiting for
182          * reply, f.e.), and once when the request has been committed and is
183          * removed from the to-be-committed list.
184          *
185          * Also, the refcount will be increased in ptl_send_rpc immediately
186          * before we hand it off to portals, and there will be a corresponding
187          * decrease in request_out_cb (which is called to indicate that portals
188          * is finished with the request, and it can be safely freed).
189          *
190          * (Except in the DLM server case, where it will be dropped twice
191          * by the sender, and then the last time by request_out_callback.)
192          */
193         atomic_set(&request->rq_refcount, 2);
194
195         spin_lock(&conn->c_lock);
196         request->rq_xid = HTON__u32(++conn->c_xid_out);
197         spin_unlock(&conn->c_lock);
198
199         request->rq_reqmsg->magic = PTLRPC_MSG_MAGIC; 
200         request->rq_reqmsg->version = PTLRPC_MSG_VERSION;
201         request->rq_reqmsg->opc = HTON__u32(opcode);
202         request->rq_reqmsg->flags = 0;
203
204         ptlrpc_hdl2req(request, &imp->imp_handle);
205         RETURN(request);
206 }
207
208 void ptlrpc_req_finished(struct ptlrpc_request *request)
209 {
210         if (request == NULL)
211                 return;
212
213         if (atomic_dec_and_test(&request->rq_refcount))
214                 ptlrpc_free_req(request);
215         else
216                 DEBUG_REQ(D_INFO, request, "refcount now %u",
217                           atomic_read(&request->rq_refcount));
218 }
219
220 void ptlrpc_free_req(struct ptlrpc_request *request)
221 {
222         ENTRY;
223         if (request == NULL) {
224                 EXIT;
225                 return;
226         }
227
228         if (atomic_read(&request->rq_refcount) != 0) {
229                 CERROR("freeing request %p (%d->%s:%d) with refcount %d\n",
230                        request, request->rq_reqmsg->opc,
231                        request->rq_connection->c_remote_uuid,
232                        request->rq_import->imp_client->cli_request_portal,
233                        request->rq_refcount);
234                 /* LBUG(); */
235         }
236
237         if (request->rq_repmsg != NULL) { 
238                 OBD_FREE(request->rq_repmsg, request->rq_replen);
239                 request->rq_repmsg = NULL;
240                 request->rq_reply_md.start = NULL; 
241         }
242         if (request->rq_reqmsg != NULL) {
243                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
244                 request->rq_reqmsg = NULL;
245         }
246
247         if (request->rq_connection) {
248                 spin_lock(&request->rq_connection->c_lock);
249                 list_del_init(&request->rq_list);
250                 spin_unlock(&request->rq_connection->c_lock);
251         }
252
253         ptlrpc_put_connection(request->rq_connection);
254         OBD_FREE(request, sizeof(*request));
255         EXIT;
256 }
257
258 static int ptlrpc_check_reply(struct ptlrpc_request *req)
259 {
260         int rc = 0;
261
262         if (req->rq_repmsg != NULL) {
263                 struct ptlrpc_connection *conn = req->rq_import->imp_connection;
264                 if (req->rq_level > conn->c_level) {
265                         CDEBUG(D_HA,
266                                "rep to xid "LPD64" op %d to %s:%d: "
267                                "recovery started, ignoring (%d > %d)\n",
268                                (unsigned long long)req->rq_xid,
269                                req->rq_reqmsg->opc, conn->c_remote_uuid,
270                                req->rq_import->imp_client->cli_request_portal,
271                                req->rq_level, conn->c_level);
272                         req->rq_repmsg = NULL;
273                         GOTO(out, rc = 0);
274                 }
275                 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
276                 req->rq_flags |= PTL_RPC_FL_REPLIED;
277                 GOTO(out, rc = 1);
278         }
279
280         if (req->rq_flags & PTL_RPC_FL_RESEND) { 
281                 CERROR("-- RESTART --\n");
282                 GOTO(out, rc = 1);
283         }
284
285         if (req->rq_flags & PTL_RPC_FL_ERR) {
286                 CERROR("-- ABORTED --\n");
287                 GOTO(out, rc = 1);
288         }
289
290  out:
291         CDEBUG(D_NET, "req = %p, rc = %d\n", req, rc);
292         return rc;
293 }
294
295 int ptlrpc_check_status(struct ptlrpc_request *req, int err)
296 {
297         ENTRY;
298
299         if (err != 0) {
300                 CERROR("err is %d\n", err);
301                 RETURN(err);
302         }
303
304         if (req == NULL) {
305                 CERROR("req == NULL\n");
306                 RETURN(-ENOMEM);
307         }
308
309         if (req->rq_repmsg == NULL) {
310                 CERROR("req->rq_repmsg == NULL\n");
311                 RETURN(-ENOMEM);
312         }
313
314         err = req->rq_repmsg->status;
315         if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
316                 CERROR("req->rq_repmsg->type == PTL_RPC_MSG_ERR\n");
317                 RETURN(err ? err : -EINVAL);
318         }
319
320         if (err != 0) {
321                 if (err < 0)
322                         CERROR("req->rq_repmsg->status is %d\n", err);
323                 else
324                         CDEBUG(D_INFO, "req->rq_repmsg->status is %d\n", err);
325                 /* XXX: translate this error from net to host */
326                 RETURN(err);
327         }
328
329         RETURN(0);
330 }
331
332 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
333 {
334         OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
335         request->rq_reqmsg = NULL;
336         request->rq_reqlen = 0;
337 }
338
339 /* Abort this request and cleanup any resources associated with it. */
340 static int ptlrpc_abort(struct ptlrpc_request *request)
341 {
342         /* First remove the ME for the reply; in theory, this means
343          * that we can tear down the buffer safely. */
344         PtlMEUnlink(request->rq_reply_me_h);
345         OBD_FREE(request->rq_reply_md.start, request->rq_replen);
346         request->rq_repmsg = NULL;
347         request->rq_replen = 0;
348         return 0;
349 }
350
351 /* caller must hold conn->c_lock */
352 void ptlrpc_free_committed(struct ptlrpc_connection *conn)
353 {
354         struct list_head *tmp, *saved;
355         struct ptlrpc_request *req;
356
357 restart:
358         list_for_each_safe(tmp, saved, &conn->c_sending_head) {
359                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
360
361                 if (req->rq_flags & PTL_RPC_FL_REPLAY) {
362                         DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
363                         continue;
364                 }
365
366                 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
367                         DEBUG_REQ(D_HA, req, "keeping (in-flight)");
368                         continue;
369                 }
370
371                 /* not yet committed */
372                 if (req->rq_transno > conn->c_last_committed)
373                         break;
374                 
375                 DEBUG_REQ(D_HA, req, "committing (last_committed %Lu)",
376                           (long long)conn->c_last_committed);
377                 if (atomic_dec_and_test(&req->rq_refcount)) {
378                         /* We do this to prevent free_req deadlock.
379                          * Restarting after each removal is not so bad, as we are
380                          * almost always deleting the first item in the list.
381                          *
382                          * If we use a recursive lock here, we can skip the
383                          * unlock/lock/restart sequence.
384                          */
385                         spin_unlock(&conn->c_lock);
386                         ptlrpc_free_req(req);
387                         spin_lock(&conn->c_lock);
388                         goto restart;
389                 } else {
390                         list_del(&req->rq_list);
391                         list_add(&req->rq_list, &conn->c_dying_head);
392                 }
393         }
394
395         EXIT;
396         return;
397 }
398
399 void ptlrpc_cleanup_client(struct obd_import *imp)
400 {
401         struct list_head *tmp, *saved;
402         struct ptlrpc_request *req;
403         struct ptlrpc_connection *conn = imp->imp_connection;
404         ENTRY;
405
406         LASSERT(conn);
407
408 restart1:
409         spin_lock(&conn->c_lock);
410         list_for_each_safe(tmp, saved, &conn->c_sending_head) {
411                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
412                 if (req->rq_import != imp)
413                         continue;
414                 /* XXX we should make sure that nobody's sleeping on these! */
415                 DEBUG_REQ(D_HA, req, "cleaning up from sending list");
416                 list_del_init(&req->rq_list);
417                 req->rq_import = NULL;
418                 spin_unlock(&conn->c_lock);
419                 ptlrpc_req_finished(req);
420                 goto restart1;
421         }
422 restart2:
423         list_for_each_safe(tmp, saved, &conn->c_dying_head) {
424                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
425                 if (req->rq_import != imp)
426                         continue;
427                 DEBUG_REQ(D_ERROR, req, "on dying list at cleanup");
428                 list_del_init(&req->rq_list);
429                 req->rq_import = NULL;
430                 spin_unlock(&conn->c_lock);
431                 ptlrpc_req_finished(req); 
432                 spin_lock(&conn->c_lock);
433                 goto restart2;
434         }
435         spin_unlock(&conn->c_lock);
436
437         EXIT;
438         return;
439 }
440
441 void ptlrpc_continue_req(struct ptlrpc_request *req)
442 {
443         ENTRY;
444         CDEBUG(D_HA, "continue delayed request "LPD64" opc %d\n", 
445                req->rq_xid, req->rq_reqmsg->opc); 
446         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
447         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
448         wake_up(&req->rq_wait_for_rep); 
449         EXIT;
450 }
451
452 void ptlrpc_resend_req(struct ptlrpc_request *req)
453 {
454         ENTRY;
455         CDEBUG(D_HA, "resend request "LPD64", opc %d\n", 
456                req->rq_xid, req->rq_reqmsg->opc);
457         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
458         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
459         req->rq_status = -EAGAIN;
460         req->rq_level = LUSTRE_CONN_RECOVD;
461         req->rq_flags |= PTL_RPC_FL_RESEND;
462         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
463         wake_up(&req->rq_wait_for_rep);
464         EXIT;
465 }
466
467 void ptlrpc_restart_req(struct ptlrpc_request *req)
468 {
469         ENTRY;
470         CDEBUG(D_HA, "restart completed request "LPD64", opc %d\n", 
471                req->rq_xid, req->rq_reqmsg->opc);
472         req->rq_status = -ERESTARTSYS;
473         req->rq_flags |= PTL_RPC_FL_RECOVERY;
474         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
475         wake_up(&req->rq_wait_for_rep);
476         EXIT;
477 }
478
479 static int expired_request(void *data)
480 {
481         struct ptlrpc_request *req = data;
482
483         ENTRY;
484         if (!req) {
485                 CERROR("NULL req!");
486                 LBUG();
487                 RETURN(0);
488         }
489
490         DEBUG_REQ(D_ERROR, req, "timeout");
491         req->rq_flags |= PTL_RPC_FL_TIMEOUT;
492
493         if (!req->rq_import) {
494                 DEBUG_REQ(D_ERROR, req, "NULL import");
495                 LBUG();
496                 RETURN(0);
497         }
498
499         if (!req->rq_import->imp_connection) {
500                 DEBUG_REQ(D_ERROR, req, "NULL connection");
501                 LBUG();
502                 RETURN(0);
503         }
504
505         if (!req->rq_import->imp_connection->c_recovd_data.rd_recovd)
506                 RETURN(1);
507
508         req->rq_timeout = 0;
509         req->rq_connection->c_level = LUSTRE_CONN_RECOVD;
510         recovd_conn_fail(req->rq_import->imp_connection);
511
512         /* If this request is for recovery or other primordial tasks,
513          * don't go back to sleep.
514          */
515         if (req->rq_level < LUSTRE_CONN_FULL)
516                 RETURN(1);
517         RETURN(0);
518 }
519
520 static int interrupted_request(void *data)
521 {
522         struct ptlrpc_request *req = data;
523         ENTRY;
524         req->rq_flags |= PTL_RPC_FL_INTR;
525         RETURN(1); /* ignored, as of this writing */
526 }
527
528 /* If we're being torn down by umount -f, or the import has been
529  * invalidated (such as by an OST failure), the request must fail with
530  * -EIO.
531  *
532  * Must be called with conn->c_lock held, will drop it if it returns -EIO.
533  *
534  * XXX this should just be testing the import, and umount_begin shouldn't touch
535  * XXX the connection.
536  */
537 #define EIO_IF_INVALID(conn, req)                                             \
538 if ((conn->c_flags & CONN_INVALID) ||                                         \
539     (req->rq_import->imp_flags & IMP_INVALID)) {                              \
540         DEBUG_REQ(D_ERROR, req, "%s_INVALID:",                                \
541                   (conn->c_flags & CONN_INVALID) ? "CONN" : "IMP");           \
542         spin_unlock(&conn->c_lock);                                           \
543         RETURN(-EIO);                                                         \
544 }        
545
546 int ptlrpc_queue_wait(struct ptlrpc_request *req)
547 {
548         int rc = 0;
549         struct l_wait_info lwi;
550         struct ptlrpc_client *cli = req->rq_import->imp_client;
551         struct ptlrpc_connection *conn = req->rq_import->imp_connection;
552         ENTRY;
553
554         init_waitqueue_head(&req->rq_wait_for_rep);
555         //        DEBUG_REQ(D_HA, req, "subsys: %s:", cli->cli_name);
556
557         /* XXX probably both an import and connection level are needed */
558         if (req->rq_level > conn->c_level) { 
559                 spin_lock(&conn->c_lock);
560                 EIO_IF_INVALID(conn, req);
561                 list_del(&req->rq_list);
562                 list_add_tail(&req->rq_list, &conn->c_delayed_head);
563                 spin_unlock(&conn->c_lock);
564
565                 DEBUG_REQ(D_HA, req, "waiting for recovery: (%d < %d)",
566                           req->rq_level, conn->c_level);
567                 lwi = LWI_INTR(NULL, NULL);
568                 rc = l_wait_event(req->rq_wait_for_rep,
569                                   (req->rq_level <= conn->c_level) ||
570                                   (req->rq_flags & PTL_RPC_FL_ERR), &lwi);
571
572                 spin_lock(&conn->c_lock);
573                 list_del_init(&req->rq_list);
574                 spin_unlock(&conn->c_lock);
575
576                 if (req->rq_flags & PTL_RPC_FL_ERR)
577                         RETURN(-EIO);
578
579                 if (rc)
580                         RETURN(rc);
581                 
582                 CERROR("process %d resumed\n", current->pid);
583         }
584  resend:
585         req->rq_timeout = obd_timeout;
586         spin_lock(&conn->c_lock);
587         EIO_IF_INVALID(conn, req);
588         
589         list_del(&req->rq_list);
590         list_add_tail(&req->rq_list, &conn->c_sending_head);
591         spin_unlock(&conn->c_lock);
592         rc = ptl_send_rpc(req);
593         if (rc) {
594                 CDEBUG(D_HA, "error %d, opcode %d, need recovery\n", rc,
595                        req->rq_reqmsg->opc);
596                 /* the sleep below will time out, triggering recovery */
597         }
598
599         DEBUG_REQ(D_NET, req, "-- sleeping");
600         lwi = LWI_TIMEOUT_INTR(req->rq_timeout * HZ, expired_request,
601                                interrupted_request, req);
602         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
603         DEBUG_REQ(D_NET, req, "-- done sleeping");
604
605         if (req->rq_flags & PTL_RPC_FL_ERR) {
606                 ptlrpc_abort(req);
607                 GOTO(out, rc = -EIO);
608         }
609
610         /* Don't resend if we were interrupted. */
611         if ((req->rq_flags & (PTL_RPC_FL_RESEND | PTL_RPC_FL_INTR)) ==
612             PTL_RPC_FL_RESEND) {
613                 req->rq_flags &= ~PTL_RPC_FL_RESEND;
614                 DEBUG_REQ(D_HA, req, "resending: ");
615                 goto resend;
616         }
617
618         // up(&cli->cli_rpc_sem);
619         if (req->rq_flags & PTL_RPC_FL_INTR) {
620                 if (!(req->rq_flags & PTL_RPC_FL_TIMEOUT))
621                         LBUG(); /* should only be interrupted if we timed out */
622                 /* Clean up the dangling reply buffers */
623                 ptlrpc_abort(req);
624                 GOTO(out, rc = -EINTR);
625         }
626
627         if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
628                 GOTO(out, rc = -ETIMEDOUT);
629
630         if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
631                 GOTO(out, rc = req->rq_status);
632
633         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
634         if (rc) {
635                 CERROR("unpack_rep failed: %d\n", rc);
636                 GOTO(out, rc);
637         }
638 #if 0
639         /* FIXME: Enable when BlueArc makes new release */
640         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
641             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
642                 CERROR("invalid packet type received (type=%u)\n",
643                        req->rq_repmsg->type);
644                 LBUG();
645                 GOTO(out, rc = -EINVAL);
646         }
647 #endif
648         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
649         if (req->rq_repmsg->status == 0)
650                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
651                        req->rq_replen, req->rq_repmsg->status);
652
653         spin_lock(&conn->c_lock);
654
655         /* Requests that aren't from replayable imports, or which don't have
656          * transno information, can be "committed" early.
657          */
658
659         if ((req->rq_import->imp_flags & IMP_REPLAYABLE) == 0 ||
660             req->rq_repmsg->transno == 0) {
661                 /* This import doesn't support replay, so we can just "commit"
662                  * this request now.
663                  */
664                 DEBUG_REQ(D_HA, req, "not replayable, committing:");
665                 list_del_init(&req->rq_list);
666                 spin_unlock(&conn->c_lock);
667                 ptlrpc_req_finished(req); /* Must be called unlocked. */
668                 spin_lock(&conn->c_lock);
669         }
670
671         /* Replay-enabled imports return commit-status information. */
672         if (req->rq_import->imp_flags & IMP_REPLAYABLE) {
673                 /* XXX this needs to be per-import, or multiple MDS services on
674                  * XXX the same system are going to interfere messily with each
675                  * XXX others' transno spaces.
676                  */
677                 conn->c_last_xid = req->rq_repmsg->last_xid;
678                 conn->c_last_committed = req->rq_repmsg->last_committed;
679                 ptlrpc_free_committed(conn);
680         }
681
682         spin_unlock(&conn->c_lock);
683
684         EXIT;
685  out:
686         return rc;
687 }
688
689 #undef EIO_IF_INVALID
690
691 int ptlrpc_replay_req(struct ptlrpc_request *req)
692 {
693         int rc = 0, old_level, old_status = 0;
694         // struct ptlrpc_client *cli = req->rq_import->imp_client;
695         struct l_wait_info lwi;
696         ENTRY;
697
698         init_waitqueue_head(&req->rq_wait_for_rep);
699         DEBUG_REQ(D_NET, req, "");
700
701         req->rq_timeout = obd_timeout;
702         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
703         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
704
705         /* temporarily set request to RECOVD level (reset at out:) */
706         old_level = req->rq_level;
707         if (req->rq_flags & PTL_RPC_FL_REPLIED)
708                 old_status = req->rq_repmsg->status;
709         req->rq_level = LUSTRE_CONN_RECOVD;
710         rc = ptl_send_rpc(req);
711         if (rc) {
712                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
713                 ptlrpc_cleanup_request_buf(req);
714                 // up(&cli->cli_rpc_sem);
715                 GOTO(out, rc = -rc);
716         }
717
718         CDEBUG(D_OTHER, "-- sleeping\n");
719         lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
720         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
721         CDEBUG(D_OTHER, "-- done\n");
722
723         // up(&cli->cli_rpc_sem);
724
725         if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
726                 CERROR("Unknown reason for wakeup\n");
727                 /* XXX Phil - I end up here when I kill obdctl */
728                 ptlrpc_abort(req);
729                 GOTO(out, rc = -EINTR);
730         }
731
732         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
733         if (rc) {
734                 CERROR("unpack_rep failed: %d\n", rc);
735                 GOTO(out, rc);
736         }
737
738         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
739
740         /* let the callback do fixups, possibly including in the request */
741         if (req->rq_replay_cb)
742                 req->rq_replay_cb(req);
743
744         if ((req->rq_flags & PTL_RPC_FL_REPLIED) &&
745             req->rq_repmsg->status != old_status) {
746                 DEBUG_REQ(D_HA, req, "status %d, old was %d",
747                           req->rq_repmsg->status, old_status);
748         }
749
750  out:
751         req->rq_level = old_level;
752         RETURN(rc);
753 }