Whamcloud - gitweb
- Add D_HA for recovery diagnostics, and use it in a handful of places.
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/obd_class.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/lustre_ha.h>
29 #include <linux/lustre_import.h>
30
31 void ptlrpc_init_client(int req_portal, int rep_portal, char *name, 
32                         struct ptlrpc_client *cl)
33 {
34         cl->cli_request_portal = req_portal;
35         cl->cli_reply_portal   = rep_portal;
36         cl->cli_name           = name;
37 }
38
39 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
40 {
41         return req->rq_connection->c_remote_uuid;
42 }
43
44 struct ptlrpc_connection *ptlrpc_uuid_to_connection(obd_uuid_t uuid)
45 {
46         struct ptlrpc_connection *c;
47         struct lustre_peer peer;
48         int err;
49
50         err = kportal_uuid_to_peer(uuid, &peer);
51         if (err != 0) {
52                 CERROR("cannot find peer %s!\n", uuid);
53                 return NULL;
54         }
55
56         c = ptlrpc_get_connection(&peer, uuid);
57         if (c) { 
58                 memcpy(c->c_remote_uuid, uuid, sizeof(c->c_remote_uuid));
59                 c->c_epoch++;
60         }
61
62         CDEBUG(D_INFO, "%s -> %p\n", uuid, c);
63
64         return c;
65 }
66
67 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,obd_uuid_t uuid)
68 {
69         struct lustre_peer peer;
70         int err;
71
72         err = kportal_uuid_to_peer(uuid, &peer);
73         if (err != 0) {
74                 CERROR("cannot find peer %s!\n", uuid);
75                 return;
76         }
77         
78         memcpy(&conn->c_peer, &peer, sizeof(peer)); 
79         return;
80 }
81
82 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
83 {
84         struct ptlrpc_bulk_desc *desc;
85
86         OBD_ALLOC(desc, sizeof(*desc));
87         if (desc != NULL) {
88                 desc->bd_connection = ptlrpc_connection_addref(conn);
89                 atomic_set(&desc->bd_refcount, 1);
90                 init_waitqueue_head(&desc->bd_waitq);
91                 INIT_LIST_HEAD(&desc->bd_page_list);
92                 ptl_set_inv_handle(&desc->bd_md_h);
93                 ptl_set_inv_handle(&desc->bd_me_h);
94         }
95
96         return desc;
97 }
98
99 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
100 {
101         struct ptlrpc_bulk_page *bulk;
102
103         OBD_ALLOC(bulk, sizeof(*bulk));
104         if (bulk != NULL) {
105                 bulk->bp_desc = desc;
106                 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
107                 desc->bd_page_count++;
108         }
109         return bulk;
110 }
111
112 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
113 {
114         struct list_head *tmp, *next;
115         ENTRY;
116         if (desc == NULL) {
117                 EXIT;
118                 return;
119         }
120
121         list_for_each_safe(tmp, next, &desc->bd_page_list) {
122                 struct ptlrpc_bulk_page *bulk;
123                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
124                 ptlrpc_free_bulk_page(bulk);
125         }
126
127         ptlrpc_put_connection(desc->bd_connection);
128
129         OBD_FREE(desc, sizeof(*desc));
130         EXIT;
131 }
132
133 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
134 {
135         ENTRY;
136         if (bulk == NULL) {
137                 EXIT;
138                 return;
139         }
140
141         list_del(&bulk->bp_link);
142         bulk->bp_desc->bd_page_count--;
143         OBD_FREE(bulk, sizeof(*bulk));
144         EXIT;
145 }
146
147 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
148                                        int count, int *lengths, char **bufs)
149 {
150         struct ptlrpc_connection *conn = imp->imp_connection;
151         struct ptlrpc_request *request;
152         int rc;
153         ENTRY;
154
155         OBD_ALLOC(request, sizeof(*request));
156         if (!request) {
157                 CERROR("request allocation out of memory\n");
158                 RETURN(NULL);
159         }
160
161         rc = lustre_pack_msg(count, lengths, bufs,
162                              &request->rq_reqlen, &request->rq_reqmsg);
163         if (rc) {
164                 CERROR("cannot pack request %d\n", rc);
165                 OBD_FREE(request, sizeof(*request));
166                 RETURN(NULL);
167         }
168
169         request->rq_level = LUSTRE_CONN_FULL;
170         request->rq_type = PTL_RPC_MSG_REQUEST;
171         request->rq_import = imp;
172         request->rq_connection = ptlrpc_connection_addref(conn);
173
174         INIT_LIST_HEAD(&request->rq_list);
175         /*
176          * This will be reduced once when the sender is finished (waiting for
177          * reply, f.e.), once when the request has been committed and is
178          * removed from the to-be-committed list, and once when portals is
179          * finished with it and has called request_out_callback.
180          *
181          * (Except in the DLM server case, where it will be dropped twice
182          * by the sender, and then the last time by request_out_callback.)
183          */
184         atomic_set(&request->rq_refcount, 3);
185
186         spin_lock(&conn->c_lock);
187         request->rq_xid = HTON__u32(++conn->c_xid_out);
188         spin_unlock(&conn->c_lock);
189
190         request->rq_reqmsg->magic = PTLRPC_MSG_MAGIC; 
191         request->rq_reqmsg->version = PTLRPC_MSG_VERSION;
192         request->rq_reqmsg->opc = HTON__u32(opcode);
193
194         ptlrpc_hdl2req(request, &imp->imp_handle);
195         RETURN(request);
196 }
197
198 void ptlrpc_req_finished(struct ptlrpc_request *request)
199 {
200         if (request == NULL)
201                 return;
202
203         if (request->rq_repmsg != NULL) { 
204                 OBD_FREE(request->rq_repmsg, request->rq_replen);
205                 request->rq_repmsg = NULL;
206                 request->rq_reply_md.start = NULL; 
207         }
208
209         if (atomic_dec_and_test(&request->rq_refcount))
210                 ptlrpc_free_req(request);
211 }
212
213 void ptlrpc_free_req(struct ptlrpc_request *request)
214 {
215         ENTRY;
216         if (request == NULL) {
217                 EXIT;
218                 return;
219         }
220
221         if (request->rq_repmsg != NULL)
222                 OBD_FREE(request->rq_repmsg, request->rq_replen);
223         if (request->rq_reqmsg != NULL)
224                 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
225
226         if (request->rq_connection) {
227                 spin_lock(&request->rq_connection->c_lock);
228                 list_del_init(&request->rq_list);
229                 spin_unlock(&request->rq_connection->c_lock);
230         }
231
232         ptlrpc_put_connection(request->rq_connection);
233         OBD_FREE(request, sizeof(*request));
234         EXIT;
235 }
236
237 static int ptlrpc_check_reply(struct ptlrpc_request *req)
238 {
239         int rc = 0;
240
241         if (req->rq_repmsg != NULL) {
242                 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
243                 req->rq_flags |= PTL_RPC_FL_REPLIED;
244                 GOTO(out, rc = 1);
245         }
246
247         if (req->rq_flags & PTL_RPC_FL_RESEND) { 
248                 CERROR("-- RESTART --\n");
249                 GOTO(out, rc = 1);
250         }
251
252  out:
253         CDEBUG(D_NET, "req = %p, rc = %d\n", req, rc);
254         return rc;
255 }
256
257 int ptlrpc_check_status(struct ptlrpc_request *req, int err)
258 {
259         ENTRY;
260
261         if (err != 0) {
262                 CERROR("err is %d\n", err);
263                 RETURN(err);
264         }
265
266         if (req == NULL) {
267                 CERROR("req == NULL\n");
268                 RETURN(-ENOMEM);
269         }
270
271         if (req->rq_repmsg == NULL) {
272                 CERROR("req->rq_repmsg == NULL\n");
273                 RETURN(-ENOMEM);
274         }
275
276         err = req->rq_repmsg->status;
277         if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
278                 CERROR("req->rq_repmsg->type == PTL_RPC_MSG_ERR\n");
279                 RETURN(err ? err : -EINVAL);
280         }
281
282         if (err != 0) {
283                 if (err < 0)
284                         CERROR("req->rq_repmsg->status is %d\n", err);
285                 else
286                         CDEBUG(D_INFO, "req->rq_repmsg->status is %d\n", err);
287                 /* XXX: translate this error from net to host */
288                 RETURN(err);
289         }
290
291         RETURN(0);
292 }
293
294 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
295 {
296         OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
297         request->rq_reqmsg = NULL;
298         request->rq_reqlen = 0;
299 }
300
301 /* Abort this request and cleanup any resources associated with it. */
302 static int ptlrpc_abort(struct ptlrpc_request *request)
303 {
304         /* First remove the ME for the reply; in theory, this means
305          * that we can tear down the buffer safely. */
306         PtlMEUnlink(request->rq_reply_me_h);
307         OBD_FREE(request->rq_reply_md.start, request->rq_replen);
308         request->rq_repmsg = NULL;
309         request->rq_replen = 0;
310         return 0;
311 }
312
313 /* caller must hold conn->c_lock */
314 void ptlrpc_free_committed(struct ptlrpc_connection *conn)
315 {
316         struct list_head *tmp, *saved;
317         struct ptlrpc_request *req;
318
319 restart:
320         list_for_each_safe(tmp, saved, &conn->c_sending_head) {
321                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
322
323                 if (req->rq_flags & PTL_RPC_FL_REPLAY) {
324                         CDEBUG(D_INFO, "Keeping req %p xid "LPD64" for replay\n",
325                                req, req->rq_xid);
326                         continue;
327                 }
328
329                 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
330                         CDEBUG(D_INFO, "Keeping in-flight req %p xid "LPD64
331                                " for replay\n", req, req->rq_xid);
332                         continue;
333                 }
334
335                 /* not yet committed */
336                 if (req->rq_transno > conn->c_last_committed)
337                         break;
338                 
339                 CDEBUG(D_INFO, "Marking request %p xid %Ld as committed "
340                        "transno=%Lu, last_committed=%Lu\n", req,
341                        (long long)req->rq_xid, (long long)req->rq_transno,
342                        (long long)conn->c_last_committed);
343                 if (atomic_dec_and_test(&req->rq_refcount)) {
344                         req->rq_import = NULL;
345
346                         /* We do this to prevent free_req deadlock.  Restarting
347                          * after each removal is not so bad, as we are almost
348                          * always deleting the first item in the list.
349                          */
350                         spin_unlock(&conn->c_lock);
351                         ptlrpc_free_req(req);
352                         spin_lock(&conn->c_lock);
353                         goto restart;
354                 } else {
355                         list_del(&req->rq_list);
356                         list_add(&req->rq_list, &conn->c_dying_head);
357                 }
358         }
359
360         EXIT;
361         return;
362 }
363
364 void ptlrpc_cleanup_client(struct obd_import *imp)
365 {
366         struct list_head *tmp, *saved;
367         struct ptlrpc_request *req;
368         struct ptlrpc_connection *conn = imp->imp_connection;
369         ENTRY;
370
371         LASSERT(conn);
372
373 restart1:
374         spin_lock(&conn->c_lock);
375         list_for_each_safe(tmp, saved, &conn->c_sending_head) {
376                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
377                 if (req->rq_import != imp)
378                         continue;
379                 /* XXX we should make sure that nobody's sleeping on these! */
380                 CDEBUG(D_INFO, "Cleaning req %p from sending list.\n", req);
381                 list_del_init(&req->rq_list);
382                 req->rq_import = NULL;
383                 spin_unlock(&conn->c_lock);
384                 ptlrpc_free_req(req);
385                 goto restart1;
386         }
387 restart2:
388         list_for_each_safe(tmp, saved, &conn->c_dying_head) {
389                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
390                 if (req->rq_import != imp)
391                         continue;
392                 CERROR("Request %p is on the dying list at cleanup!\n", req);
393                 list_del_init(&req->rq_list);
394                 req->rq_import = NULL;
395                 spin_unlock(&conn->c_lock);
396                 ptlrpc_free_req(req); 
397                 spin_lock(&conn->c_lock);
398                 goto restart2;
399         }
400         spin_unlock(&conn->c_lock);
401
402         EXIT;
403         return;
404 }
405
406 void ptlrpc_continue_req(struct ptlrpc_request *req)
407 {
408         ENTRY;
409         CDEBUG(D_HA, "continue delayed request "LPD64" opc %d\n", 
410                req->rq_xid, req->rq_reqmsg->opc); 
411         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
412         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
413         wake_up(&req->rq_wait_for_rep); 
414         EXIT;
415 }
416
417 void ptlrpc_resend_req(struct ptlrpc_request *req)
418 {
419         ENTRY;
420         CDEBUG(D_HA, "resend request "LPD64", opc %d\n", 
421                req->rq_xid, req->rq_reqmsg->opc);
422         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
423         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
424         req->rq_status = -EAGAIN;
425         req->rq_level = LUSTRE_CONN_RECOVD;
426         req->rq_flags |= PTL_RPC_FL_RESEND;
427         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
428         wake_up(&req->rq_wait_for_rep);
429         EXIT;
430 }
431
432 void ptlrpc_restart_req(struct ptlrpc_request *req)
433 {
434         ENTRY;
435         CDEBUG(D_HA, "restart completed request "LPD64", opc %d\n", 
436                req->rq_xid, req->rq_reqmsg->opc);
437         req->rq_status = -ERESTARTSYS;
438         req->rq_flags |= PTL_RPC_FL_RECOVERY;
439         req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
440         wake_up(&req->rq_wait_for_rep);
441         EXIT;
442 }
443
444 static int expired_request(void *data)
445 {
446         struct ptlrpc_request *req = data;
447
448         ENTRY;
449         CDEBUG(D_HA, "req xid "LPD64" op %d: timeout on conn to %s:%d\n",
450                (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
451                req->rq_connection->c_remote_uuid,
452                req->rq_import->imp_client->cli_request_portal);
453         req->rq_flags |= PTL_RPC_FL_TIMEOUT;
454         if (!req->rq_import->imp_connection->c_recovd_data.rd_recovd)
455                 RETURN(1);
456
457         req->rq_timeout = 0;
458         req->rq_connection->c_level = LUSTRE_CONN_RECOVD;
459         recovd_conn_fail(req->rq_import->imp_connection);
460
461         /* If this request is for recovery or other primordial tasks,
462          * don't go back to sleep.
463          */
464         if (req->rq_level < LUSTRE_CONN_FULL)
465                 RETURN(1);
466         RETURN(0);
467 }
468
469 static int interrupted_request(void *data)
470 {
471         struct ptlrpc_request *req = data;
472         ENTRY;
473         req->rq_flags |= PTL_RPC_FL_INTR;
474         RETURN(1); /* ignored, as of this writing */
475 }
476
477 int ptlrpc_queue_wait(struct ptlrpc_request *req)
478 {
479         int rc = 0;
480         struct l_wait_info lwi;
481         struct ptlrpc_client *cli = req->rq_import->imp_client;
482         struct ptlrpc_connection *conn = req->rq_import->imp_connection;
483         ENTRY;
484
485         init_waitqueue_head(&req->rq_wait_for_rep);
486         CDEBUG(D_NET, "subsys: %s req "LPD64" opc %d level %d, conn level %d\n",
487                cli->cli_name, req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
488                req->rq_connection->c_level);
489
490         /* XXX probably both an import and connection level are needed */
491         if (req->rq_level > conn->c_level) { 
492                 spin_lock(&conn->c_lock);
493                 if (conn->c_flags & CONN_INVALID) {
494                         /* being torn down by "umount -f" */
495                         CERROR("req xid "LPD64" op %d to %s:%d: CONN_INVALID\n",
496                                (unsigned long long)req->rq_xid,
497                                req->rq_reqmsg->opc,
498                                req->rq_connection->c_remote_uuid,
499                                req->rq_import->imp_client->cli_request_portal);
500                         spin_unlock(&conn->c_lock);
501                         RETURN(-EIO);
502                 }
503                 list_del(&req->rq_list);
504                 list_add_tail(&req->rq_list, &conn->c_delayed_head);
505                 spin_unlock(&conn->c_lock);
506
507                 CDEBUG(D_HA, "req xid "LPD64" op %d to %s:%d: waiting for "
508                        "recovery (%d < %d)\n",
509                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
510                        req->rq_connection->c_remote_uuid,
511                        req->rq_import->imp_client->cli_request_portal,
512                        req->rq_level, conn->c_level);
513
514                 lwi = LWI_INTR(NULL, NULL);
515                 rc = l_wait_event(req->rq_wait_for_rep,
516                                   (req->rq_level <= conn->c_level) ||
517                                   (req->rq_flags & PTL_RPC_FL_ERR), &lwi);
518
519                 spin_lock(&conn->c_lock);
520                 list_del_init(&req->rq_list);
521                 spin_unlock(&conn->c_lock);
522
523                 if (req->rq_flags & PTL_RPC_FL_ERR)
524                         RETURN(-EIO);
525
526                 if (rc)
527                         RETURN(rc);
528                 
529                 CDEBUG(D_HA, "process %d resumed\n", current->pid);
530         }
531  resend:
532         req->rq_timeout = obd_timeout;
533         spin_lock(&conn->c_lock);
534         if (conn->c_flags & CONN_INVALID) {
535                 CERROR("req xid "LPD64" op %d to %s:%d: CONN_INVALID\n",
536                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
537                        req->rq_connection->c_remote_uuid,
538                        req->rq_import->imp_client->cli_request_portal);
539                 spin_unlock(&conn->c_lock); /* being torn down by "umount -f" */
540                 RETURN(-EIO);
541         }
542         
543         list_del(&req->rq_list);
544         list_add_tail(&req->rq_list, &conn->c_sending_head);
545         spin_unlock(&conn->c_lock);
546         rc = ptl_send_rpc(req);
547         if (rc) {
548                 CDEBUG(D_HA, "error %d, opcode %d, need recovery\n", rc,
549                        req->rq_reqmsg->opc);
550                 /* the sleep below will time out, triggering recovery */
551         }
552
553         CDEBUG(D_NET, "-- sleeping on req xid "LPD64" op %d to %s:%d\n",
554                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
555                        req->rq_connection->c_remote_uuid,
556                        req->rq_import->imp_client->cli_request_portal);
557         lwi = LWI_TIMEOUT_INTR(req->rq_timeout * HZ, expired_request,
558                                interrupted_request,req);
559         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
560         CDEBUG(D_NET, "-- done sleeping on req xid "LPD64" op %d to %s:%d\n",
561                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
562                        req->rq_connection->c_remote_uuid,
563                        req->rq_import->imp_client->cli_request_portal);
564
565         /* Don't resend if we were interrupted. */
566         if ((req->rq_flags & (PTL_RPC_FL_RESEND | PTL_RPC_FL_INTR)) ==
567             PTL_RPC_FL_RESEND) {
568                 req->rq_flags &= ~PTL_RPC_FL_RESEND;
569                 CDEBUG(D_HA, "req xid "LPD64" op %d to %s:%d\n",
570                        (unsigned long long)req->rq_xid, req->rq_reqmsg->opc,
571                        req->rq_connection->c_remote_uuid,
572                        req->rq_import->imp_client->cli_request_portal);
573                 /* we'll get sent again, so balance 2nd request_out_callback */
574                 atomic_inc(&req->rq_refcount);
575                 goto resend;
576         }
577
578         // up(&cli->cli_rpc_sem);
579         if (req->rq_flags & PTL_RPC_FL_INTR) {
580                 if (!(req->rq_flags & PTL_RPC_FL_TIMEOUT))
581                         LBUG(); /* should only be interrupted if we timed out */
582                 /* Clean up the dangling reply buffers */
583                 ptlrpc_abort(req);
584                 GOTO(out, rc = -EINTR);
585         }
586
587         if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
588                 GOTO(out, rc = -ETIMEDOUT);
589
590         if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
591                 GOTO(out, rc = req->rq_status);
592
593         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
594         if (rc) {
595                 CERROR("unpack_rep failed: %d\n", rc);
596                 GOTO(out, rc);
597         }
598 #if 0
599         /* FIXME: Enable when BlueArc makes new release */
600         if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
601             req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
602                 CERROR("invalid packet type received (type=%u)\n",
603                        req->rq_repmsg->type);
604                 LBUG();
605                 GOTO(out, rc = -EINVAL);
606         }
607 #endif
608         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
609         if (req->rq_repmsg->status == 0)
610                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
611                        req->rq_replen, req->rq_repmsg->status);
612
613         spin_lock(&conn->c_lock);
614         conn->c_last_xid = req->rq_repmsg->last_xid;
615         conn->c_last_committed = req->rq_repmsg->last_committed;
616         ptlrpc_free_committed(conn);
617         spin_unlock(&conn->c_lock);
618
619         EXIT;
620  out:
621         return rc;
622 }
623
624 int ptlrpc_replay_req(struct ptlrpc_request *req)
625 {
626         int rc = 0;
627         // struct ptlrpc_client *cli = req->rq_import->imp_client;
628         struct l_wait_info lwi;
629         ENTRY;
630
631         init_waitqueue_head(&req->rq_wait_for_rep);
632         CDEBUG(D_NET, "req "LPD64" opc %d level %d, conn level %d\n",
633                req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
634                req->rq_connection->c_level);
635
636         req->rq_timeout = obd_timeout;
637         req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
638         req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
639
640         /* add a ref, which will again be balanced in request_out_callback */
641         atomic_inc(&req->rq_refcount);
642         rc = ptl_send_rpc(req);
643         if (rc) {
644                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
645                 ptlrpc_cleanup_request_buf(req);
646                 // up(&cli->cli_rpc_sem);
647                 RETURN(-rc);
648         }
649
650         CDEBUG(D_OTHER, "-- sleeping\n");
651         lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
652         l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
653         CDEBUG(D_OTHER, "-- done\n");
654
655         // up(&cli->cli_rpc_sem);
656
657         if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
658                 CERROR("Unknown reason for wakeup\n");
659                 /* XXX Phil - I end up here when I kill obdctl */
660                 ptlrpc_abort(req);
661                 GOTO(out, rc = -EINTR);
662         }
663
664         rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
665         if (rc) {
666                 CERROR("unpack_rep failed: %d\n", rc);
667                 GOTO(out, rc);
668         }
669
670         CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
671         if (req->rq_repmsg->status == 0)
672                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
673                        req->rq_replen, req->rq_repmsg->status);
674         else {
675                 CERROR("recovery failed: "); 
676                 CERROR("req "LPD64" opc %d level %d, conn level %d\n", 
677                        req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
678                        req->rq_connection->c_level);
679                 LBUG();
680         }
681
682         if (req->rq_replay_cb)
683                 req->rq_replay_cb(req, &req->rq_replay_cb_handle);
684
685  out:
686         RETURN(rc);
687 }