1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <linux/obd_support.h>
26 #include <linux/obd_class.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/lustre_ha.h>
29 #include <linux/lustre_import.h>
31 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
32 struct ptlrpc_client *cl)
34 cl->cli_request_portal = req_portal;
35 cl->cli_reply_portal = rep_portal;
39 struct obd_uuid *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
41 return &req->rq_connection->c_remote_uuid;
44 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
46 struct ptlrpc_connection *c;
47 struct lustre_peer peer;
50 err = kportal_uuid_to_peer(uuid->uuid, &peer);
52 CERROR("cannot find peer %s!\n", uuid->uuid);
56 c = ptlrpc_get_connection(&peer, uuid);
58 memcpy(c->c_remote_uuid.uuid,
59 uuid->uuid, sizeof(c->c_remote_uuid.uuid));
63 CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
68 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,struct obd_uuid *uuid)
70 struct lustre_peer peer;
73 err = kportal_uuid_to_peer(uuid->uuid, &peer);
75 CERROR("cannot find peer %s!\n", uuid->uuid);
79 memcpy(&conn->c_peer, &peer, sizeof(peer));
83 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
85 struct ptlrpc_bulk_desc *desc;
87 OBD_ALLOC(desc, sizeof(*desc));
89 desc->bd_connection = ptlrpc_connection_addref(conn);
90 atomic_set(&desc->bd_refcount, 1);
91 init_waitqueue_head(&desc->bd_waitq);
92 INIT_LIST_HEAD(&desc->bd_page_list);
93 INIT_LIST_HEAD(&desc->bd_set_chain);
94 ptl_set_inv_handle(&desc->bd_md_h);
95 ptl_set_inv_handle(&desc->bd_me_h);
101 int ptlrpc_bulk_error(struct ptlrpc_bulk_desc *desc)
104 if (desc->bd_flags & PTL_RPC_FL_TIMEOUT) {
105 rc = (desc->bd_flags & PTL_RPC_FL_INTR ? -ERESTARTSYS :
111 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
113 struct ptlrpc_bulk_page *bulk;
115 OBD_ALLOC(bulk, sizeof(*bulk));
117 bulk->bp_desc = desc;
118 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
119 desc->bd_page_count++;
124 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
126 struct list_head *tmp, *next;
133 LASSERT(list_empty(&desc->bd_set_chain));
135 if (atomic_read(&desc->bd_refcount) != 0)
136 CERROR("freeing desc %p with refcount %d!\n", desc,
137 atomic_read(&desc->bd_refcount));
139 list_for_each_safe(tmp, next, &desc->bd_page_list) {
140 struct ptlrpc_bulk_page *bulk;
141 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
142 ptlrpc_free_bulk_page(bulk);
145 ptlrpc_put_connection(desc->bd_connection);
147 OBD_FREE(desc, sizeof(*desc));
151 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
159 list_del(&bulk->bp_link);
160 bulk->bp_desc->bd_page_count--;
161 OBD_FREE(bulk, sizeof(*bulk));
165 static int ll_sync_brw_timeout(void *data)
167 struct obd_brw_set *set = data;
168 struct list_head *tmp;
174 set->brw_flags |= PTL_RPC_FL_TIMEOUT;
176 list_for_each(tmp, &set->brw_desc_head) {
177 struct ptlrpc_bulk_desc *desc =
178 list_entry(tmp, struct ptlrpc_bulk_desc, bd_set_chain);
180 /* Skip descriptors that were completed successfully. */
181 if (desc->bd_flags & (PTL_BULK_FL_RCVD | PTL_BULK_FL_SENT))
184 LASSERT(desc->bd_connection);
186 /* If PtlMDUnlink succeeds, then it hasn't completed yet. If it
187 * fails, the bulk finished _just_ in time (after the timeout
188 * fired but before we got this far) and we'll let it live.
190 if (PtlMDUnlink(desc->bd_md_h) != 0) {
191 CERROR("Near-miss on OST %s -- need to adjust "
193 desc->bd_connection->c_remote_uuid.uuid);
197 CERROR("IO of %d pages to/from %s:%d (conn %p) timed out\n",
199 desc->bd_connection->c_remote_uuid.uuid,
200 desc->bd_portal, desc->bd_connection);
202 /* This one will "never" arrive, don't wait for it. */
203 if (atomic_dec_and_test(&set->brw_refcount))
204 wake_up(&set->brw_waitq);
206 if (class_signal_connection_failure)
207 class_signal_connection_failure(desc->bd_connection);
212 /* 0 = We go back to sleep, until we're resumed or interrupted */
213 /* 1 = We can't be recovered, just abort the syscall with -ETIMEDOUT */
217 static int ll_sync_brw_intr(void *data)
219 struct obd_brw_set *set = data;
222 set->brw_flags |= PTL_RPC_FL_INTR;
223 RETURN(1); /* ignored, as of this writing */
226 int ll_brw_sync_wait(struct obd_brw_set *set, int phase)
228 struct l_wait_info lwi;
229 struct list_head *tmp, *next;
235 lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ, ll_sync_brw_timeout,
236 ll_sync_brw_intr, set);
237 rc = l_wait_event(set->brw_waitq,
238 atomic_read(&set->brw_refcount) == 0, &lwi);
240 list_for_each_safe(tmp, next, &set->brw_desc_head) {
241 struct ptlrpc_bulk_desc *desc =
242 list_entry(tmp, struct ptlrpc_bulk_desc,
244 list_del_init(&desc->bd_set_chain);
245 ptlrpc_bulk_decref(desc);
248 case CB_PHASE_FINISH:
249 if (atomic_dec_and_test(&set->brw_refcount))
250 wake_up(&set->brw_waitq);
259 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
260 int count, int *lengths, char **bufs)
262 struct ptlrpc_connection *conn;
263 struct ptlrpc_request *request;
267 LASSERT((unsigned long)imp > 0x1000);
268 conn = imp->imp_connection;
270 OBD_ALLOC(request, sizeof(*request));
272 CERROR("request allocation out of memory\n");
276 rc = lustre_pack_msg(count, lengths, bufs,
277 &request->rq_reqlen, &request->rq_reqmsg);
279 CERROR("cannot pack request %d\n", rc);
280 OBD_FREE(request, sizeof(*request));
284 request->rq_level = LUSTRE_CONN_FULL;
285 request->rq_type = PTL_RPC_MSG_REQUEST;
286 request->rq_import = imp;
288 /* XXX FIXME bug 625069, now 249 */
289 request->rq_request_portal = imp->imp_client->cli_request_portal;
290 request->rq_reply_portal = imp->imp_client->cli_reply_portal;
292 request->rq_connection = ptlrpc_connection_addref(conn);
294 INIT_LIST_HEAD(&request->rq_list);
295 atomic_set(&request->rq_refcount, 1);
297 request->rq_reqmsg->magic = PTLRPC_MSG_MAGIC;
298 request->rq_reqmsg->version = PTLRPC_MSG_VERSION;
299 request->rq_reqmsg->opc = HTON__u32(opcode);
300 request->rq_reqmsg->flags = 0;
302 ptlrpc_hdl2req(request, &imp->imp_handle);
306 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
309 if (request == NULL) {
314 if (atomic_read(&request->rq_refcount) != 0) {
315 CERROR("freeing request %p (%d->%s:%d) with refcount %d\n",
316 request, request->rq_reqmsg->opc,
317 request->rq_connection->c_remote_uuid.uuid,
318 request->rq_import->imp_client->cli_request_portal,
319 atomic_read (&request->rq_refcount));
323 if (request->rq_repmsg != NULL) {
324 OBD_FREE(request->rq_repmsg, request->rq_replen);
325 request->rq_repmsg = NULL;
326 request->rq_reply_md.start = NULL;
328 if (request->rq_reqmsg != NULL) {
329 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
330 request->rq_reqmsg = NULL;
333 if (request->rq_import) {
334 unsigned long flags = 0;
336 spin_lock_irqsave(&request->rq_import->imp_lock, flags);
337 list_del_init(&request->rq_list);
339 spin_unlock_irqrestore(&request->rq_import->imp_lock,
343 ptlrpc_put_connection(request->rq_connection);
344 OBD_FREE(request, sizeof(*request));
348 void ptlrpc_free_req(struct ptlrpc_request *request)
350 __ptlrpc_free_req(request, 0);
353 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
359 if (request == (void *)(long)(0x5a5a5a5a5a5a5a5a)) {
360 CERROR("dereferencing freed request (bug 575)\n");
365 DEBUG_REQ(D_INFO, request, "refcount now %u",
366 atomic_read(&request->rq_refcount) - 1);
368 if (atomic_dec_and_test(&request->rq_refcount)) {
369 __ptlrpc_free_req(request, locked);
376 void ptlrpc_req_finished(struct ptlrpc_request *request)
378 __ptlrpc_req_finished(request, 0);
381 static int ptlrpc_check_reply(struct ptlrpc_request *req)
386 if (req->rq_repmsg != NULL) {
387 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
388 /* Store transno in reqmsg for replay. */
389 req->rq_reqmsg->transno = req->rq_repmsg->transno;
390 req->rq_flags |= PTL_RPC_FL_REPLIED;
394 if (req->rq_flags & PTL_RPC_FL_RESEND) {
396 DEBUG_REQ(D_ERROR, req, "RESEND:");
400 if (req->rq_flags & PTL_RPC_FL_ERR) {
402 DEBUG_REQ(D_ERROR, req, "ABORTED:");
406 if (req->rq_flags & PTL_RPC_FL_RESTART) {
407 DEBUG_REQ(D_ERROR, req, "RESTART:");
412 DEBUG_REQ(D_NET, req, "rc = %d for", rc);
416 static int ptlrpc_check_status(struct ptlrpc_request *req)
421 err = req->rq_repmsg->status;
422 if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
423 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR (%d)", err);
424 RETURN(err ? err : -EINVAL);
428 DEBUG_REQ(D_INFO, req, "status is %d", err);
429 } else if (err > 0) {
430 /* XXX: translate this error from net to host */
431 DEBUG_REQ(D_INFO, req, "status is %d", err);
437 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
439 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
440 request->rq_reqmsg = NULL;
441 request->rq_reqlen = 0;
444 /* Abort this request and cleanup any resources associated with it. */
445 static int ptlrpc_abort(struct ptlrpc_request *request)
447 /* First remove the ME for the reply; in theory, this means
448 * that we can tear down the buffer safely. */
449 if (PtlMEUnlink(request->rq_reply_me_h) != PTL_OK)
451 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
453 memset(&request->rq_reply_me_h, 0, sizeof(request->rq_reply_me_h));
454 request->rq_reply_md.start = NULL;
455 request->rq_repmsg = NULL;
459 /* caller must hold imp->imp_lock */
460 void ptlrpc_free_committed(struct obd_import *imp)
462 struct list_head *tmp, *saved;
463 struct ptlrpc_request *req;
466 LASSERT(imp != NULL);
469 LASSERT(spin_is_locked(&imp->imp_lock));
472 CDEBUG(D_HA, "committing for last_committed "LPU64"\n",
473 imp->imp_peer_committed_transno);
475 list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
476 req = list_entry(tmp, struct ptlrpc_request, rq_list);
478 if (req->rq_flags & PTL_RPC_FL_REPLAY) {
479 DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
483 /* not yet committed */
484 if (req->rq_transno > imp->imp_peer_committed_transno) {
485 DEBUG_REQ(D_HA, req, "stopping search");
489 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
490 imp->imp_peer_committed_transno);
491 list_del_init(&req->rq_list);
492 __ptlrpc_req_finished(req, 1);
499 void ptlrpc_cleanup_client(struct obd_import *imp)
501 struct list_head *tmp, *saved;
502 struct ptlrpc_request *req;
503 struct ptlrpc_connection *conn = imp->imp_connection;
509 spin_lock_irqsave(&imp->imp_lock, flags);
510 list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
511 req = list_entry(tmp, struct ptlrpc_request, rq_list);
513 /* XXX we should make sure that nobody's sleeping on these! */
514 DEBUG_REQ(D_HA, req, "cleaning up from sending list");
515 list_del_init(&req->rq_list);
516 req->rq_import = NULL;
517 __ptlrpc_req_finished(req, 0);
519 spin_unlock_irqrestore(&imp->imp_lock, flags);
525 void ptlrpc_continue_req(struct ptlrpc_request *req)
528 DEBUG_REQ(D_HA, req, "continuing delayed request");
529 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
530 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
531 wake_up(&req->rq_wait_for_rep);
535 void ptlrpc_resend_req(struct ptlrpc_request *req)
538 DEBUG_REQ(D_HA, req, "resending");
539 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
540 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
541 req->rq_status = -EAGAIN;
542 req->rq_level = LUSTRE_CONN_RECOVD;
543 req->rq_flags |= PTL_RPC_FL_RESEND;
544 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
545 wake_up(&req->rq_wait_for_rep);
549 void ptlrpc_restart_req(struct ptlrpc_request *req)
552 DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
553 req->rq_status = -ERESTARTSYS;
554 req->rq_flags |= PTL_RPC_FL_RESTART;
555 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
556 wake_up(&req->rq_wait_for_rep);
560 static int expired_request(void *data)
562 struct ptlrpc_request *req = data;
571 DEBUG_REQ(D_ERROR, req, "timeout");
573 req->rq_flags |= PTL_RPC_FL_TIMEOUT;
575 if (!req->rq_import) {
576 DEBUG_REQ(D_HA, req, "NULL import; already cleaned up?");
580 if (!req->rq_import->imp_connection) {
581 DEBUG_REQ(D_ERROR, req, "NULL connection");
586 if (!req->rq_import->imp_connection->c_recovd_data.rd_recovd)
589 recovd_conn_fail(req->rq_import->imp_connection);
591 /* If this request is for recovery or other primordial tasks,
592 * don't go back to sleep.
594 if (req->rq_level < LUSTRE_CONN_FULL)
599 static int interrupted_request(void *data)
601 struct ptlrpc_request *req = data;
603 req->rq_flags |= PTL_RPC_FL_INTR;
604 RETURN(1); /* ignored, as of this writing */
607 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
610 atomic_inc(&req->rq_refcount);
614 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
615 struct obd_import *imp)
617 struct list_head *tmp;
620 LASSERT(spin_is_locked(&imp->imp_lock));
623 LASSERT(imp->imp_flags & IMP_REPLAYABLE);
624 /* Balanced in ptlrpc_free_committed, usually. */
625 ptlrpc_request_addref(req);
626 list_for_each_prev(tmp, &imp->imp_replay_list) {
627 struct ptlrpc_request *iter =
628 list_entry(tmp, struct ptlrpc_request, rq_list);
630 /* We may have duplicate transnos if we create and then
631 * open a file, or for closes retained if to match creating
632 * opens, so use req->rq_xid as a secondary key.
633 * (See bugs 684, 685, and 428.)
635 if (iter->rq_transno > req->rq_transno)
638 if (iter->rq_transno == req->rq_transno) {
639 LASSERT(iter->rq_xid != req->rq_xid);
640 if (iter->rq_xid > req->rq_xid)
644 list_add(&req->rq_list, &iter->rq_list);
648 list_add_tail(&req->rq_list, &imp->imp_replay_list);
651 int ptlrpc_queue_wait(struct ptlrpc_request *req)
654 struct l_wait_info lwi;
655 struct obd_import *imp = req->rq_import;
656 struct ptlrpc_connection *conn = imp->imp_connection;
660 init_waitqueue_head(&req->rq_wait_for_rep);
662 spin_lock_irqsave(&imp->imp_lock, flags);
663 req->rq_xid = HTON__u32(++imp->imp_last_xid);
664 spin_unlock_irqrestore(&imp->imp_lock, flags);
666 /* for distributed debugging */
667 req->rq_reqmsg->status = HTON__u32(current->pid);
668 CDEBUG(D_RPCTRACE, "Sending RPC pid:xid:nid:opc %d:"LPU64":%x:%d\n",
669 NTOH__u32(req->rq_reqmsg->status), req->rq_xid,
670 conn->c_peer.peer_nid, NTOH__u32(req->rq_reqmsg->opc));
672 spin_lock_irqsave(&imp->imp_lock, flags);
675 * If the import has been invalidated (such as by an OST failure), the
676 * request must fail with -EIO.
678 if (req->rq_import->imp_flags & IMP_INVALID) {
679 DEBUG_REQ(D_ERROR, req, "IMP_INVALID:");
680 spin_unlock_irqrestore(&imp->imp_lock, flags);
684 if (req->rq_level > imp->imp_level) {
685 list_del(&req->rq_list);
686 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
687 spin_unlock_irqrestore(&imp->imp_lock, flags);
689 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%d < %d)",
690 current->comm, req->rq_level, imp->imp_level);
691 lwi = LWI_INTR(NULL, NULL);
692 rc = l_wait_event(req->rq_wait_for_rep,
693 (req->rq_level <= imp->imp_level) ||
694 (req->rq_flags & PTL_RPC_FL_ERR), &lwi);
696 if (req->rq_flags & PTL_RPC_FL_ERR)
702 spin_lock_irqsave(&imp->imp_lock, flags);
703 list_del_init(&req->rq_list);
706 spin_unlock_irqrestore(&imp->imp_lock, flags);
710 CERROR("process %d resumed\n", current->pid);
714 LASSERT(list_empty(&req->rq_list));
715 list_add_tail(&req->rq_list, &imp->imp_sending_list);
716 spin_unlock_irqrestore(&imp->imp_lock, flags);
717 rc = ptl_send_rpc(req);
719 CDEBUG(D_HA, "error %d, opcode %d, need recovery\n", rc,
720 req->rq_reqmsg->opc);
721 /* sleep for a jiffy, then trigger recovery */
722 lwi = LWI_TIMEOUT_INTR(1, expired_request,
723 interrupted_request, req);
725 DEBUG_REQ(D_NET, req, "-- sleeping");
726 lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ, expired_request,
727 interrupted_request, req);
729 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
730 DEBUG_REQ(D_NET, req, "-- done sleeping");
732 spin_lock_irqsave(&imp->imp_lock, flags);
733 list_del_init(&req->rq_list);
734 spin_unlock_irqrestore(&imp->imp_lock, flags);
736 if (req->rq_flags & PTL_RPC_FL_ERR) {
738 GOTO(out, rc = -EIO);
741 /* Don't resend if we were interrupted. */
742 if ((req->rq_flags & (PTL_RPC_FL_RESEND | PTL_RPC_FL_INTR)) ==
744 req->rq_flags &= ~PTL_RPC_FL_RESEND;
745 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
746 DEBUG_REQ(D_HA, req, "resending: ");
747 spin_lock_irqsave(&imp->imp_lock, flags);
751 if (req->rq_flags & PTL_RPC_FL_INTR) {
752 if (!(req->rq_flags & PTL_RPC_FL_TIMEOUT))
753 LBUG(); /* should only be interrupted if we timed out */
754 /* Clean up the dangling reply buffers */
756 GOTO(out, rc = -EINTR);
759 if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
760 GOTO(out, rc = -ETIMEDOUT);
762 if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
763 GOTO(out, rc = req->rq_status);
765 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
767 CERROR("unpack_rep failed: %d\n", rc);
771 /* FIXME: Enable when BlueArc makes new release */
772 if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
773 req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
774 CERROR("invalid packet type received (type=%u)\n",
775 req->rq_repmsg->type);
777 GOTO(out, rc = -EINVAL);
780 DEBUG_REQ(D_NET, req, "status %d", req->rq_repmsg->status);
782 /* We're a rejected connection, need to invalidate and rebuild. */
783 if (req->rq_repmsg->status == -ENOTCONN) {
784 spin_lock_irqsave(&imp->imp_lock, flags);
785 /* If someone else is reconnecting us (CONN_RECOVD) or has
786 * already completed it (handle mismatch), then we just need
789 if (imp->imp_level == LUSTRE_CONN_RECOVD ||
790 imp->imp_handle.addr != req->rq_reqmsg->addr ||
791 imp->imp_handle.cookie != req->rq_reqmsg->cookie) {
792 spin_unlock_irqrestore(&imp->imp_lock, flags);
793 GOTO(out, rc = -EIO);
795 imp->imp_level = LUSTRE_CONN_RECOVD;
796 spin_unlock_irqrestore(&imp->imp_lock, flags);
797 rc = imp->imp_recover(imp, PTLRPC_RECOVD_PHASE_NOTCONN);
800 GOTO(out, rc = -EIO);
803 rc = ptlrpc_check_status(req);
805 if (req->rq_import->imp_flags & IMP_REPLAYABLE) {
806 spin_lock_irqsave(&imp->imp_lock, flags);
807 if ((req->rq_flags & PTL_RPC_FL_REPLAY || req->rq_transno != 0)
809 ptlrpc_retain_replayable_request(req, imp);
812 if (req->rq_transno > imp->imp_max_transno) {
813 imp->imp_max_transno = req->rq_transno;
816 /* Replay-enabled imports return commit-status information. */
817 if (req->rq_repmsg->last_committed) {
818 imp->imp_peer_committed_transno =
819 req->rq_repmsg->last_committed;
821 ptlrpc_free_committed(imp);
822 spin_unlock_irqrestore(&imp->imp_lock, flags);
830 int ptlrpc_replay_req(struct ptlrpc_request *req)
832 int rc = 0, old_level, old_status = 0;
833 // struct ptlrpc_client *cli = req->rq_import->imp_client;
834 struct l_wait_info lwi;
837 init_waitqueue_head(&req->rq_wait_for_rep);
838 DEBUG_REQ(D_NET, req, "");
840 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
841 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
843 /* temporarily set request to RECOVD level (reset at out:) */
844 old_level = req->rq_level;
845 if (req->rq_flags & PTL_RPC_FL_REPLIED)
846 old_status = req->rq_repmsg->status;
847 req->rq_level = LUSTRE_CONN_RECOVD;
848 rc = ptl_send_rpc(req);
850 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
851 ptlrpc_cleanup_request_buf(req);
852 // up(&cli->cli_rpc_sem);
856 CDEBUG(D_OTHER, "-- sleeping\n");
857 lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
858 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
859 CDEBUG(D_OTHER, "-- done\n");
861 // up(&cli->cli_rpc_sem);
863 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
864 CERROR("Unknown reason for wakeup\n");
865 /* XXX Phil - I end up here when I kill obdctl */
867 GOTO(out, rc = -EINTR);
870 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
872 CERROR("unpack_rep failed: %d\n", rc);
876 CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
878 /* let the callback do fixups, possibly including in the request */
879 if (req->rq_replay_cb)
880 req->rq_replay_cb(req);
882 if ((req->rq_flags & PTL_RPC_FL_REPLIED) &&
883 req->rq_repmsg->status != old_status) {
884 DEBUG_REQ(D_HA, req, "status %d, old was %d",
885 req->rq_repmsg->status, old_status);
889 req->rq_level = old_level;
893 /* XXX looks a lot like super.c:invalidate_request_list, don't it? */
894 void ptlrpc_abort_inflight(struct obd_import *imp, int dying_import)
897 struct list_head *tmp, *n;
899 /* Make sure that no new requests get processed for this import.
900 * ptlrpc_queue_wait must (and does) hold imp_lock while testing this
901 * flag and then putting requests on sending_list or delayed_list.
903 spin_lock_irqsave(&imp->imp_lock, flags);
904 imp->imp_flags |= IMP_INVALID;
905 spin_unlock_irqrestore(&imp->imp_lock, flags);
907 list_for_each_safe(tmp, n, &imp->imp_sending_list) {
908 struct ptlrpc_request *req =
909 list_entry(tmp, struct ptlrpc_request, rq_list);
911 DEBUG_REQ(D_HA, req, "inflight");
912 req->rq_flags |= PTL_RPC_FL_ERR;
914 req->rq_import = NULL;
915 wake_up(&req->rq_wait_for_rep);
918 list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
919 struct ptlrpc_request *req =
920 list_entry(tmp, struct ptlrpc_request, rq_list);
922 DEBUG_REQ(D_HA, req, "aborting waiting req");
923 req->rq_flags |= PTL_RPC_FL_ERR;
925 req->rq_import = NULL;
926 wake_up(&req->rq_wait_for_rep);