1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <linux/obd_support.h>
26 #include <linux/obd_class.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/lustre_ha.h>
29 #include <linux/lustre_import.h>
31 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
32 struct ptlrpc_client *cl)
34 cl->cli_request_portal = req_portal;
35 cl->cli_reply_portal = rep_portal;
39 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
41 return req->rq_connection->c_remote_uuid;
44 struct ptlrpc_connection *ptlrpc_uuid_to_connection(obd_uuid_t uuid)
46 struct ptlrpc_connection *c;
47 struct lustre_peer peer;
50 err = kportal_uuid_to_peer(uuid, &peer);
52 CERROR("cannot find peer %s!\n", uuid);
56 c = ptlrpc_get_connection(&peer, uuid);
58 memcpy(c->c_remote_uuid, uuid, sizeof(c->c_remote_uuid));
62 CDEBUG(D_INFO, "%s -> %p\n", uuid, c);
67 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,obd_uuid_t uuid)
69 struct lustre_peer peer;
72 err = kportal_uuid_to_peer(uuid, &peer);
74 CERROR("cannot find peer %s!\n", uuid);
78 memcpy(&conn->c_peer, &peer, sizeof(peer));
82 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
84 struct ptlrpc_bulk_desc *desc;
86 OBD_ALLOC(desc, sizeof(*desc));
88 desc->bd_connection = ptlrpc_connection_addref(conn);
89 atomic_set(&desc->bd_refcount, 1);
90 init_waitqueue_head(&desc->bd_waitq);
91 INIT_LIST_HEAD(&desc->bd_page_list);
92 INIT_LIST_HEAD(&desc->bd_set_chain);
93 ptl_set_inv_handle(&desc->bd_md_h);
94 ptl_set_inv_handle(&desc->bd_me_h);
100 int ptlrpc_bulk_error(struct ptlrpc_bulk_desc *desc)
103 if (desc->bd_flags & PTL_RPC_FL_TIMEOUT) {
104 rc = (desc->bd_flags & PTL_RPC_FL_INTR ? -ERESTARTSYS :
110 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
112 struct ptlrpc_bulk_page *bulk;
114 OBD_ALLOC(bulk, sizeof(*bulk));
116 bulk->bp_desc = desc;
117 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
118 desc->bd_page_count++;
123 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
125 struct list_head *tmp, *next;
132 LASSERT(list_empty(&desc->bd_set_chain));
134 if (atomic_read(&desc->bd_refcount) != 0)
135 CERROR("freeing desc %p with refcount %d!\n", desc,
136 atomic_read(&desc->bd_refcount));
138 list_for_each_safe(tmp, next, &desc->bd_page_list) {
139 struct ptlrpc_bulk_page *bulk;
140 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
141 ptlrpc_free_bulk_page(bulk);
144 ptlrpc_put_connection(desc->bd_connection);
146 OBD_FREE(desc, sizeof(*desc));
150 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
158 list_del(&bulk->bp_link);
159 bulk->bp_desc->bd_page_count--;
160 OBD_FREE(bulk, sizeof(*bulk));
164 static int ll_sync_brw_timeout(void *data)
166 struct obd_brw_set *set = data;
167 struct list_head *tmp;
173 set->brw_flags |= PTL_RPC_FL_TIMEOUT;
175 list_for_each(tmp, &set->brw_desc_head) {
176 struct ptlrpc_bulk_desc *desc =
177 list_entry(tmp, struct ptlrpc_bulk_desc, bd_set_chain);
179 /* Skip descriptors that were completed successfully. */
180 if (desc->bd_flags & (PTL_BULK_FL_RCVD | PTL_BULK_FL_SENT))
183 LASSERT(desc->bd_connection);
185 /* If PtlMDUnlink succeeds, then it hasn't completed yet. If it
186 * fails, the bulk finished _just_ in time (after the timeout
187 * fired but before we got this far) and we'll let it live.
189 if (PtlMDUnlink(desc->bd_md_h) != 0) {
190 CERROR("Near-miss on OST %s -- need to adjust "
192 desc->bd_connection->c_remote_uuid);
196 CERROR("IO of %d pages to/from %s:%d (conn %p) timed out\n",
197 desc->bd_page_count, desc->bd_connection->c_remote_uuid,
198 desc->bd_portal, desc->bd_connection);
200 /* This one will "never" arrive, don't wait for it. */
201 if (atomic_dec_and_test(&set->brw_refcount))
202 wake_up(&set->brw_waitq);
204 if (class_signal_connection_failure)
205 class_signal_connection_failure(desc->bd_connection);
210 /* 0 = We go back to sleep, until we're resumed or interrupted */
211 /* 1 = We can't be recovered, just abort the syscall with -ETIMEDOUT */
215 static int ll_sync_brw_intr(void *data)
217 struct obd_brw_set *set = data;
220 set->brw_flags |= PTL_RPC_FL_INTR;
221 RETURN(1); /* ignored, as of this writing */
224 int ll_brw_sync_wait(struct obd_brw_set *set, int phase)
226 struct l_wait_info lwi;
227 struct list_head *tmp, *next;
233 lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ, ll_sync_brw_timeout,
234 ll_sync_brw_intr, set);
235 rc = l_wait_event(set->brw_waitq,
236 atomic_read(&set->brw_refcount) == 0, &lwi);
238 list_for_each_safe(tmp, next, &set->brw_desc_head) {
239 struct ptlrpc_bulk_desc *desc =
240 list_entry(tmp, struct ptlrpc_bulk_desc,
242 list_del_init(&desc->bd_set_chain);
243 ptlrpc_bulk_decref(desc);
246 case CB_PHASE_FINISH:
247 if (atomic_dec_and_test(&set->brw_refcount))
248 wake_up(&set->brw_waitq);
257 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
258 int count, int *lengths, char **bufs)
260 struct ptlrpc_connection *conn;
261 struct ptlrpc_request *request;
266 conn = imp->imp_connection;
268 OBD_ALLOC(request, sizeof(*request));
270 CERROR("request allocation out of memory\n");
274 rc = lustre_pack_msg(count, lengths, bufs,
275 &request->rq_reqlen, &request->rq_reqmsg);
277 CERROR("cannot pack request %d\n", rc);
278 OBD_FREE(request, sizeof(*request));
282 request->rq_level = LUSTRE_CONN_FULL;
283 request->rq_type = PTL_RPC_MSG_REQUEST;
284 request->rq_import = imp;
286 /* XXX FIXME bug 625069 */
287 request->rq_request_portal = imp->imp_client->cli_request_portal;
288 request->rq_reply_portal = imp->imp_client->cli_reply_portal;
290 request->rq_connection = ptlrpc_connection_addref(conn);
292 INIT_LIST_HEAD(&request->rq_list);
293 atomic_set(&request->rq_refcount, 1);
295 spin_lock(&imp->imp_lock);
296 request->rq_xid = HTON__u32(++imp->imp_last_xid);
297 spin_unlock(&imp->imp_lock);
299 request->rq_reqmsg->magic = PTLRPC_MSG_MAGIC;
300 request->rq_reqmsg->version = PTLRPC_MSG_VERSION;
301 request->rq_reqmsg->opc = HTON__u32(opcode);
302 request->rq_reqmsg->flags = 0;
304 ptlrpc_hdl2req(request, &imp->imp_handle);
308 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
311 if (request == NULL) {
316 if (atomic_read(&request->rq_refcount) != 0) {
317 CERROR("freeing request %p (%d->%s:%d) with refcount %d\n",
318 request, request->rq_reqmsg->opc,
319 request->rq_connection->c_remote_uuid,
320 request->rq_import->imp_client->cli_request_portal,
321 request->rq_refcount);
325 if (request->rq_repmsg != NULL) {
326 OBD_FREE(request->rq_repmsg, request->rq_replen);
327 request->rq_repmsg = NULL;
328 request->rq_reply_md.start = NULL;
330 if (request->rq_reqmsg != NULL) {
331 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
332 request->rq_reqmsg = NULL;
335 if (request->rq_import) {
337 spin_lock(&request->rq_import->imp_lock);
338 list_del_init(&request->rq_list);
340 spin_unlock(&request->rq_import->imp_lock);
343 ptlrpc_put_connection(request->rq_connection);
344 OBD_FREE(request, sizeof(*request));
348 void ptlrpc_free_req(struct ptlrpc_request *request)
350 __ptlrpc_free_req(request, 0);
353 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
359 DEBUG_REQ(D_INFO, request, "refcount now %u",
360 atomic_read(&request->rq_refcount) - 1);
362 if (atomic_dec_and_test(&request->rq_refcount)) {
363 __ptlrpc_free_req(request, locked);
370 void ptlrpc_req_finished(struct ptlrpc_request *request)
372 __ptlrpc_req_finished(request, 0);
375 static int ptlrpc_check_reply(struct ptlrpc_request *req)
380 if (req->rq_repmsg != NULL) {
381 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
382 req->rq_flags |= PTL_RPC_FL_REPLIED;
386 if (req->rq_flags & PTL_RPC_FL_RESEND) {
388 DEBUG_REQ(D_ERROR, req, "RESEND:");
392 if (req->rq_flags & PTL_RPC_FL_ERR) {
394 DEBUG_REQ(D_ERROR, req, "ABORTED:");
398 if (req->rq_flags & PTL_RPC_FL_RESTART) {
399 DEBUG_REQ(D_ERROR, req, "RESTART:");
404 DEBUG_REQ(D_NET, req, "rc = %d for", rc);
408 static int ptlrpc_check_status(struct ptlrpc_request *req)
413 err = req->rq_repmsg->status;
414 if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
415 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR (%d)\n", err);
416 RETURN(err ? err : -EINVAL);
420 DEBUG_REQ(D_ERROR, req, "status is %d", err);
421 } else if (err > 0) {
422 /* XXX: translate this error from net to host */
423 DEBUG_REQ(D_INFO, req, "status is %d", err);
429 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
431 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
432 request->rq_reqmsg = NULL;
433 request->rq_reqlen = 0;
436 /* Abort this request and cleanup any resources associated with it. */
437 static int ptlrpc_abort(struct ptlrpc_request *request)
439 /* First remove the ME for the reply; in theory, this means
440 * that we can tear down the buffer safely. */
441 PtlMEUnlink(request->rq_reply_me_h);
442 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
443 request->rq_repmsg = NULL;
444 request->rq_replen = 0;
448 /* caller must hold imp->imp_lock */
449 void ptlrpc_free_committed(struct obd_import *imp)
451 struct list_head *tmp, *saved;
452 struct ptlrpc_request *req;
456 LASSERT(spin_is_locked(&imp->imp_lock));
459 CDEBUG(D_HA, "committing for xid "LPU64", last_committed "LPU64"\n",
460 imp->imp_peer_last_xid, imp->imp_peer_committed_transno);
462 list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
463 req = list_entry(tmp, struct ptlrpc_request, rq_list);
465 if (req->rq_flags & PTL_RPC_FL_REPLAY) {
466 DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
470 /* not yet committed */
471 if (req->rq_transno > imp->imp_peer_committed_transno) {
472 DEBUG_REQ(D_HA, req, "stopping search");
476 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
477 imp->imp_peer_committed_transno);
478 __ptlrpc_req_finished(req, 1);
485 void ptlrpc_cleanup_client(struct obd_import *imp)
487 struct list_head *tmp, *saved;
488 struct ptlrpc_request *req;
489 struct ptlrpc_connection *conn = imp->imp_connection;
494 spin_lock(&imp->imp_lock);
495 list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
496 req = list_entry(tmp, struct ptlrpc_request, rq_list);
498 /* XXX we should make sure that nobody's sleeping on these! */
499 DEBUG_REQ(D_HA, req, "cleaning up from sending list");
500 list_del_init(&req->rq_list);
501 req->rq_import = NULL;
502 __ptlrpc_req_finished(req, 0);
504 spin_unlock(&imp->imp_lock);
510 void ptlrpc_continue_req(struct ptlrpc_request *req)
513 DEBUG_REQ(D_HA, req, "continuing delayed request");
514 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
515 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
516 wake_up(&req->rq_wait_for_rep);
520 void ptlrpc_resend_req(struct ptlrpc_request *req)
523 DEBUG_REQ(D_HA, req, "resending");
524 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
525 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
526 req->rq_status = -EAGAIN;
527 req->rq_level = LUSTRE_CONN_RECOVD;
528 req->rq_flags |= PTL_RPC_FL_RESEND;
529 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
530 wake_up(&req->rq_wait_for_rep);
534 void ptlrpc_restart_req(struct ptlrpc_request *req)
537 DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
538 req->rq_status = -ERESTARTSYS;
539 req->rq_flags |= PTL_RPC_FL_RESTART;
540 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
541 wake_up(&req->rq_wait_for_rep);
545 static int expired_request(void *data)
547 struct ptlrpc_request *req = data;
556 DEBUG_REQ(D_ERROR, req, "timeout");
557 req->rq_flags |= PTL_RPC_FL_TIMEOUT;
559 if (!req->rq_import) {
560 DEBUG_REQ(D_ERROR, req, "NULL import");
565 if (!req->rq_import->imp_connection) {
566 DEBUG_REQ(D_ERROR, req, "NULL connection");
571 if (!req->rq_import->imp_connection->c_recovd_data.rd_recovd)
575 recovd_conn_fail(req->rq_import->imp_connection);
578 /* If this request is for recovery or other primordial tasks,
579 * don't go back to sleep.
581 if (req->rq_level < LUSTRE_CONN_FULL)
587 static int interrupted_request(void *data)
589 struct ptlrpc_request *req = data;
591 req->rq_flags |= PTL_RPC_FL_INTR;
592 RETURN(1); /* ignored, as of this writing */
595 /* If the import has been invalidated (such as by an OST failure), the
596 * request must fail with -EIO.
598 * Must be called with imp_lock held, will drop it if it returns -EIO.
600 #define EIO_IF_INVALID(req) \
601 if (req->rq_import->imp_flags & IMP_INVALID) { \
602 DEBUG_REQ(D_ERROR, req, "IMP_INVALID:"); \
603 spin_unlock(&imp->imp_lock); \
607 int ptlrpc_queue_wait(struct ptlrpc_request *req)
610 struct l_wait_info lwi;
611 struct obd_import *imp = req->rq_import;
612 struct ptlrpc_connection *conn = imp->imp_connection;
615 init_waitqueue_head(&req->rq_wait_for_rep);
617 /* for distributed debugging */
618 req->rq_reqmsg->status = HTON__u32(current->pid);
619 CDEBUG(D_RPCTRACE, "Sending RPC pid:xid:nid:opc %d:"LPU64":%x:%d\n",
620 NTOH__u32(req->rq_reqmsg->status), req->rq_xid,
621 conn->c_peer.peer_nid, NTOH__u32(req->rq_reqmsg->opc));
623 if (req->rq_level > imp->imp_level) {
624 spin_lock(&imp->imp_lock);
626 list_del(&req->rq_list);
627 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
628 spin_unlock(&imp->imp_lock);
630 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%d < %d)",
631 current->comm, req->rq_level, imp->imp_level);
632 lwi = LWI_INTR(NULL, NULL);
633 rc = l_wait_event(req->rq_wait_for_rep,
634 (req->rq_level <= imp->imp_level) ||
635 (req->rq_flags & PTL_RPC_FL_ERR), &lwi);
637 spin_lock(&imp->imp_lock);
638 list_del_init(&req->rq_list);
639 spin_unlock(&imp->imp_lock);
641 if (req->rq_flags & PTL_RPC_FL_ERR)
647 CERROR("process %d resumed\n", current->pid);
650 req->rq_timeout = obd_timeout;
651 spin_lock(&imp->imp_lock);
654 LASSERT(list_empty(&req->rq_list));
655 list_add_tail(&req->rq_list, &imp->imp_sending_list);
656 spin_unlock(&imp->imp_lock);
657 rc = ptl_send_rpc(req);
659 CDEBUG(D_HA, "error %d, opcode %d, need recovery\n", rc,
660 req->rq_reqmsg->opc);
661 /* sleep for a jiffy, then trigger recovery */
662 lwi = LWI_TIMEOUT_INTR(1, expired_request,
663 interrupted_request, req);
665 DEBUG_REQ(D_NET, req, "-- sleeping");
666 lwi = LWI_TIMEOUT_INTR(req->rq_timeout * HZ, expired_request,
667 interrupted_request, req);
669 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
670 DEBUG_REQ(D_NET, req, "-- done sleeping");
672 spin_lock(&imp->imp_lock);
673 list_del_init(&req->rq_list);
674 spin_unlock(&imp->imp_lock);
676 if (req->rq_flags & PTL_RPC_FL_ERR) {
678 GOTO(out, rc = -EIO);
681 /* Don't resend if we were interrupted. */
682 if ((req->rq_flags & (PTL_RPC_FL_RESEND | PTL_RPC_FL_INTR)) ==
684 req->rq_flags &= ~PTL_RPC_FL_RESEND;
685 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
686 DEBUG_REQ(D_HA, req, "resending: ");
690 if (req->rq_flags & PTL_RPC_FL_INTR) {
691 if (!(req->rq_flags & PTL_RPC_FL_TIMEOUT))
692 LBUG(); /* should only be interrupted if we timed out */
693 /* Clean up the dangling reply buffers */
695 GOTO(out, rc = -EINTR);
698 if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
699 GOTO(out, rc = -ETIMEDOUT);
701 if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
702 GOTO(out, rc = req->rq_status);
704 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
706 CERROR("unpack_rep failed: %d\n", rc);
710 /* FIXME: Enable when BlueArc makes new release */
711 if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
712 req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
713 CERROR("invalid packet type received (type=%u)\n",
714 req->rq_repmsg->type);
716 GOTO(out, rc = -EINVAL);
719 CDEBUG(D_NET, "got rep "LPU64"\n", req->rq_xid);
720 if (req->rq_repmsg->status == 0)
721 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
722 req->rq_replen, req->rq_repmsg->status);
725 if (req->rq_import->imp_flags & IMP_REPLAYABLE) {
726 spin_lock(&imp->imp_lock);
727 if (req->rq_flags & PTL_RPC_FL_REPLAY || req->rq_transno != 0) {
728 /* Balanced in ptlrpc_free_committed, usually. */
729 atomic_inc(&req->rq_refcount);
730 list_add_tail(&req->rq_list, &imp->imp_replay_list);
733 if (req->rq_transno > imp->imp_max_transno) {
734 imp->imp_max_transno = req->rq_transno;
735 } else if (req->rq_transno != 0 &&
736 imp->imp_level == LUSTRE_CONN_FULL) {
737 CDEBUG(D_HA, "got transno "LPD64" after "LPD64
738 ": recovery may not work\n", req->rq_transno,
739 imp->imp_max_transno);
742 /* Replay-enabled imports return commit-status information. */
743 imp->imp_peer_last_xid = req->rq_repmsg->last_xid;
744 imp->imp_peer_committed_transno =
745 req->rq_repmsg->last_committed;
746 ptlrpc_free_committed(imp);
747 spin_unlock(&imp->imp_lock);
750 rc = ptlrpc_check_status(req);
757 #undef EIO_IF_INVALID
759 int ptlrpc_replay_req(struct ptlrpc_request *req)
761 int rc = 0, old_level, old_status = 0;
762 // struct ptlrpc_client *cli = req->rq_import->imp_client;
763 struct l_wait_info lwi;
766 init_waitqueue_head(&req->rq_wait_for_rep);
767 DEBUG_REQ(D_NET, req, "");
769 req->rq_timeout = obd_timeout;
770 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
771 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
773 /* temporarily set request to RECOVD level (reset at out:) */
774 old_level = req->rq_level;
775 if (req->rq_flags & PTL_RPC_FL_REPLIED)
776 old_status = req->rq_repmsg->status;
777 req->rq_level = LUSTRE_CONN_RECOVD;
778 rc = ptl_send_rpc(req);
780 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
781 ptlrpc_cleanup_request_buf(req);
782 // up(&cli->cli_rpc_sem);
786 CDEBUG(D_OTHER, "-- sleeping\n");
787 lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
788 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
789 CDEBUG(D_OTHER, "-- done\n");
791 // up(&cli->cli_rpc_sem);
793 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
794 CERROR("Unknown reason for wakeup\n");
795 /* XXX Phil - I end up here when I kill obdctl */
797 GOTO(out, rc = -EINTR);
800 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
802 CERROR("unpack_rep failed: %d\n", rc);
806 CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
808 /* let the callback do fixups, possibly including in the request */
809 if (req->rq_replay_cb)
810 req->rq_replay_cb(req);
812 if ((req->rq_flags & PTL_RPC_FL_REPLIED) &&
813 req->rq_repmsg->status != old_status) {
814 DEBUG_REQ(D_HA, req, "status %d, old was %d",
815 req->rq_repmsg->status, old_status);
819 req->rq_level = old_level;