1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <linux/obd_support.h>
26 #include <linux/obd_class.h>
27 #include <linux/lustre_lib.h>
28 #include <linux/lustre_ha.h>
29 #include <linux/lustre_import.h>
31 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
32 struct ptlrpc_client *cl)
34 cl->cli_request_portal = req_portal;
35 cl->cli_reply_portal = rep_portal;
39 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
41 return req->rq_connection->c_remote_uuid;
44 struct ptlrpc_connection *ptlrpc_uuid_to_connection(obd_uuid_t uuid)
46 struct ptlrpc_connection *c;
47 struct lustre_peer peer;
50 err = kportal_uuid_to_peer(uuid, &peer);
52 CERROR("cannot find peer %s!\n", uuid);
56 c = ptlrpc_get_connection(&peer, uuid);
58 memcpy(c->c_remote_uuid, uuid, sizeof(c->c_remote_uuid));
62 CDEBUG(D_INFO, "%s -> %p\n", uuid, c);
67 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,obd_uuid_t uuid)
69 struct lustre_peer peer;
72 err = kportal_uuid_to_peer(uuid, &peer);
74 CERROR("cannot find peer %s!\n", uuid);
78 memcpy(&conn->c_peer, &peer, sizeof(peer));
82 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
84 struct ptlrpc_bulk_desc *desc;
86 OBD_ALLOC(desc, sizeof(*desc));
88 desc->bd_connection = ptlrpc_connection_addref(conn);
89 atomic_set(&desc->bd_refcount, 1);
90 init_waitqueue_head(&desc->bd_waitq);
91 INIT_LIST_HEAD(&desc->bd_page_list);
92 ptl_set_inv_handle(&desc->bd_md_h);
93 ptl_set_inv_handle(&desc->bd_me_h);
99 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
101 struct ptlrpc_bulk_page *bulk;
103 OBD_ALLOC(bulk, sizeof(*bulk));
105 bulk->bp_desc = desc;
106 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
107 desc->bd_page_count++;
112 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
114 struct list_head *tmp, *next;
121 list_for_each_safe(tmp, next, &desc->bd_page_list) {
122 struct ptlrpc_bulk_page *bulk;
123 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
124 ptlrpc_free_bulk_page(bulk);
127 ptlrpc_put_connection(desc->bd_connection);
129 OBD_FREE(desc, sizeof(*desc));
133 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
141 list_del(&bulk->bp_link);
142 bulk->bp_desc->bd_page_count--;
143 OBD_FREE(bulk, sizeof(*bulk));
147 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
148 int count, int *lengths, char **bufs)
150 struct ptlrpc_connection *conn = imp->imp_connection;
151 struct ptlrpc_request *request;
155 OBD_ALLOC(request, sizeof(*request));
157 CERROR("request allocation out of memory\n");
161 rc = lustre_pack_msg(count, lengths, bufs,
162 &request->rq_reqlen, &request->rq_reqmsg);
164 CERROR("cannot pack request %d\n", rc);
165 OBD_FREE(request, sizeof(*request));
169 request->rq_level = LUSTRE_CONN_FULL;
170 request->rq_type = PTL_RPC_MSG_REQUEST;
171 request->rq_import = imp;
173 /* XXX FIXME bug 625069 */
174 request->rq_request_portal = imp->imp_client->cli_request_portal;
175 request->rq_reply_portal = imp->imp_client->cli_reply_portal;
177 request->rq_connection = ptlrpc_connection_addref(conn);
179 INIT_LIST_HEAD(&request->rq_list);
181 * This will be reduced once when the sender is finished (waiting for
182 * reply, f.e.), and once when the request has been committed and is
183 * removed from the to-be-committed list.
185 * Also, the refcount will be increased in ptl_send_rpc immediately
186 * before we hand it off to portals, and there will be a corresponding
187 * decrease in request_out_cb (which is called to indicate that portals
188 * is finished with the request, and it can be safely freed).
190 * (Except in the DLM server case, where it will be dropped twice
191 * by the sender, and then the last time by request_out_callback.)
193 atomic_set(&request->rq_refcount, 2);
195 spin_lock(&conn->c_lock);
196 request->rq_xid = HTON__u32(++conn->c_xid_out);
197 spin_unlock(&conn->c_lock);
199 request->rq_reqmsg->magic = PTLRPC_MSG_MAGIC;
200 request->rq_reqmsg->version = PTLRPC_MSG_VERSION;
201 request->rq_reqmsg->opc = HTON__u32(opcode);
202 request->rq_reqmsg->flags = 0;
204 ptlrpc_hdl2req(request, &imp->imp_handle);
208 void ptlrpc_req_finished(struct ptlrpc_request *request)
213 if (atomic_dec_and_test(&request->rq_refcount))
214 ptlrpc_free_req(request);
216 DEBUG_REQ(D_INFO, request, "refcount now %u",
217 atomic_read(&request->rq_refcount));
220 void ptlrpc_free_req(struct ptlrpc_request *request)
223 if (request == NULL) {
228 if (atomic_read(&request->rq_refcount) != 0) {
229 CERROR("freeing request %p (%d->%s:%d) with refcount %d\n",
230 request, request->rq_reqmsg->opc,
231 request->rq_connection->c_remote_uuid,
232 request->rq_import->imp_client->cli_request_portal,
233 request->rq_refcount);
237 if (request->rq_repmsg != NULL) {
238 OBD_FREE(request->rq_repmsg, request->rq_replen);
239 request->rq_repmsg = NULL;
240 request->rq_reply_md.start = NULL;
242 if (request->rq_reqmsg != NULL) {
243 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
244 request->rq_reqmsg = NULL;
247 if (request->rq_connection) {
248 spin_lock(&request->rq_connection->c_lock);
249 list_del_init(&request->rq_list);
250 spin_unlock(&request->rq_connection->c_lock);
253 ptlrpc_put_connection(request->rq_connection);
254 OBD_FREE(request, sizeof(*request));
258 static int ptlrpc_check_reply(struct ptlrpc_request *req)
262 if (req->rq_repmsg != NULL) {
263 struct ptlrpc_connection *conn = req->rq_import->imp_connection;
264 if (req->rq_level > conn->c_level) {
266 "rep to xid "LPD64" op %d to %s:%d: "
267 "recovery started, ignoring (%d > %d)\n",
268 (unsigned long long)req->rq_xid,
269 req->rq_reqmsg->opc, conn->c_remote_uuid,
270 req->rq_import->imp_client->cli_request_portal,
271 req->rq_level, conn->c_level);
272 req->rq_repmsg = NULL;
275 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
276 req->rq_flags |= PTL_RPC_FL_REPLIED;
280 if (req->rq_flags & PTL_RPC_FL_RESEND) {
281 CERROR("-- RESTART --\n");
285 if (req->rq_flags & PTL_RPC_FL_ERR) {
286 CERROR("-- ABORTED --\n");
291 CDEBUG(D_NET, "req = %p, rc = %d\n", req, rc);
295 int ptlrpc_check_status(struct ptlrpc_request *req, int err)
300 CERROR("err is %d\n", err);
305 CERROR("req == NULL\n");
309 if (req->rq_repmsg == NULL) {
310 CERROR("req->rq_repmsg == NULL\n");
314 err = req->rq_repmsg->status;
315 if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
316 CERROR("req->rq_repmsg->type == PTL_RPC_MSG_ERR\n");
317 RETURN(err ? err : -EINVAL);
322 CERROR("req->rq_repmsg->status is %d\n", err);
324 CDEBUG(D_INFO, "req->rq_repmsg->status is %d\n", err);
325 /* XXX: translate this error from net to host */
332 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
334 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
335 request->rq_reqmsg = NULL;
336 request->rq_reqlen = 0;
339 /* Abort this request and cleanup any resources associated with it. */
340 static int ptlrpc_abort(struct ptlrpc_request *request)
342 /* First remove the ME for the reply; in theory, this means
343 * that we can tear down the buffer safely. */
344 PtlMEUnlink(request->rq_reply_me_h);
345 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
346 request->rq_repmsg = NULL;
347 request->rq_replen = 0;
351 /* caller must hold conn->c_lock */
352 void ptlrpc_free_committed(struct ptlrpc_connection *conn)
354 struct list_head *tmp, *saved;
355 struct ptlrpc_request *req;
358 list_for_each_safe(tmp, saved, &conn->c_sending_head) {
359 req = list_entry(tmp, struct ptlrpc_request, rq_list);
361 if (req->rq_flags & PTL_RPC_FL_REPLAY) {
362 DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
366 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
367 DEBUG_REQ(D_HA, req, "keeping (in-flight)");
371 /* not yet committed */
372 if (req->rq_transno > conn->c_last_committed)
375 DEBUG_REQ(D_HA, req, "committing (last_committed %Lu)",
376 (long long)conn->c_last_committed);
377 if (atomic_dec_and_test(&req->rq_refcount)) {
378 /* We do this to prevent free_req deadlock. Restarting
379 * after each removal is not so bad, as we are almost
380 * always deleting the first item in the list.
382 * If we use a recursive lock here, we can skip the
383 * unlock/lock/restart sequence.
385 spin_unlock(&conn->c_lock);
386 ptlrpc_free_req(req);
387 spin_lock(&conn->c_lock);
390 list_del(&req->rq_list);
391 list_add(&req->rq_list, &conn->c_dying_head);
399 void ptlrpc_cleanup_client(struct obd_import *imp)
401 struct list_head *tmp, *saved;
402 struct ptlrpc_request *req;
403 struct ptlrpc_connection *conn = imp->imp_connection;
409 spin_lock(&conn->c_lock);
410 list_for_each_safe(tmp, saved, &conn->c_sending_head) {
411 req = list_entry(tmp, struct ptlrpc_request, rq_list);
412 if (req->rq_import != imp)
414 /* XXX we should make sure that nobody's sleeping on these! */
415 DEBUG_REQ(D_HA, req, "cleaning up from sending list");
416 list_del_init(&req->rq_list);
417 req->rq_import = NULL;
418 spin_unlock(&conn->c_lock);
419 ptlrpc_req_finished(req);
423 list_for_each_safe(tmp, saved, &conn->c_dying_head) {
424 req = list_entry(tmp, struct ptlrpc_request, rq_list);
425 if (req->rq_import != imp)
427 DEBUG_REQ(D_ERROR, req, "on dying list at cleanup");
428 list_del_init(&req->rq_list);
429 req->rq_import = NULL;
430 spin_unlock(&conn->c_lock);
431 ptlrpc_req_finished(req);
432 spin_lock(&conn->c_lock);
435 spin_unlock(&conn->c_lock);
441 void ptlrpc_continue_req(struct ptlrpc_request *req)
444 CDEBUG(D_HA, "continue delayed request "LPD64" opc %d\n",
445 req->rq_xid, req->rq_reqmsg->opc);
446 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
447 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
448 wake_up(&req->rq_wait_for_rep);
452 void ptlrpc_resend_req(struct ptlrpc_request *req)
455 CDEBUG(D_HA, "resend request "LPD64", opc %d\n",
456 req->rq_xid, req->rq_reqmsg->opc);
457 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
458 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
459 req->rq_status = -EAGAIN;
460 req->rq_level = LUSTRE_CONN_RECOVD;
461 req->rq_flags |= PTL_RPC_FL_RESEND;
462 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
463 wake_up(&req->rq_wait_for_rep);
467 void ptlrpc_restart_req(struct ptlrpc_request *req)
470 CDEBUG(D_HA, "restart completed request "LPD64", opc %d\n",
471 req->rq_xid, req->rq_reqmsg->opc);
472 req->rq_status = -ERESTARTSYS;
473 req->rq_flags |= PTL_RPC_FL_RECOVERY;
474 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
475 wake_up(&req->rq_wait_for_rep);
479 static int expired_request(void *data)
481 struct ptlrpc_request *req = data;
490 DEBUG_REQ(D_ERROR, req, "timeout");
491 req->rq_flags |= PTL_RPC_FL_TIMEOUT;
493 if (!req->rq_import) {
494 DEBUG_REQ(D_ERROR, req, "NULL import");
499 if (!req->rq_import->imp_connection) {
500 DEBUG_REQ(D_ERROR, req, "NULL connection");
505 if (!req->rq_import->imp_connection->c_recovd_data.rd_recovd)
509 req->rq_connection->c_level = LUSTRE_CONN_RECOVD;
510 recovd_conn_fail(req->rq_import->imp_connection);
512 /* If this request is for recovery or other primordial tasks,
513 * don't go back to sleep.
515 if (req->rq_level < LUSTRE_CONN_FULL)
520 static int interrupted_request(void *data)
522 struct ptlrpc_request *req = data;
524 req->rq_flags |= PTL_RPC_FL_INTR;
525 RETURN(1); /* ignored, as of this writing */
528 /* If we're being torn down by umount -f, or the import has been
529 * invalidated (such as by an OST failure), the request must fail with
532 * Must be called with conn->c_lock held, will drop it if it returns -EIO.
534 * XXX this should just be testing the import, and umount_begin shouldn't touch
535 * XXX the connection.
537 #define EIO_IF_INVALID(conn, req) \
538 if ((conn->c_flags & CONN_INVALID) || \
539 (req->rq_import->imp_flags & IMP_INVALID)) { \
540 DEBUG_REQ(D_ERROR, req, "%s_INVALID:", \
541 (conn->c_flags & CONN_INVALID) ? "CONN" : "IMP"); \
542 spin_unlock(&conn->c_lock); \
546 int ptlrpc_queue_wait(struct ptlrpc_request *req)
549 struct l_wait_info lwi;
550 //struct ptlrpc_client *cli = req->rq_import->imp_client;
551 struct ptlrpc_connection *conn = req->rq_import->imp_connection;
554 init_waitqueue_head(&req->rq_wait_for_rep);
555 //DEBUG_REQ(D_HA, req, "subsys: %s:", cli->cli_name);
557 /* XXX probably both an import and connection level are needed */
558 if (req->rq_level > conn->c_level) {
559 spin_lock(&conn->c_lock);
560 EIO_IF_INVALID(conn, req);
561 list_del(&req->rq_list);
562 list_add_tail(&req->rq_list, &conn->c_delayed_head);
563 spin_unlock(&conn->c_lock);
565 DEBUG_REQ(D_HA, req, "waiting for recovery: (%d < %d)",
566 req->rq_level, conn->c_level);
567 lwi = LWI_INTR(NULL, NULL);
568 rc = l_wait_event(req->rq_wait_for_rep,
569 (req->rq_level <= conn->c_level) ||
570 (req->rq_flags & PTL_RPC_FL_ERR), &lwi);
572 spin_lock(&conn->c_lock);
573 list_del_init(&req->rq_list);
574 spin_unlock(&conn->c_lock);
576 if (req->rq_flags & PTL_RPC_FL_ERR)
582 CERROR("process %d resumed\n", current->pid);
585 req->rq_timeout = obd_timeout;
586 spin_lock(&conn->c_lock);
587 EIO_IF_INVALID(conn, req);
589 list_del(&req->rq_list);
590 list_add_tail(&req->rq_list, &conn->c_sending_head);
591 spin_unlock(&conn->c_lock);
592 rc = ptl_send_rpc(req);
594 CDEBUG(D_HA, "error %d, opcode %d, need recovery\n", rc,
595 req->rq_reqmsg->opc);
596 /* the sleep below will time out, triggering recovery */
599 DEBUG_REQ(D_NET, req, "-- sleeping");
600 lwi = LWI_TIMEOUT_INTR(req->rq_timeout * HZ, expired_request,
601 interrupted_request, req);
602 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
603 DEBUG_REQ(D_NET, req, "-- done sleeping");
605 if (req->rq_flags & PTL_RPC_FL_ERR) {
607 GOTO(out, rc = -EIO);
610 /* Don't resend if we were interrupted. */
611 if ((req->rq_flags & (PTL_RPC_FL_RESEND | PTL_RPC_FL_INTR)) ==
613 req->rq_flags &= ~PTL_RPC_FL_RESEND;
614 DEBUG_REQ(D_HA, req, "resending: ");
618 // up(&cli->cli_rpc_sem);
619 if (req->rq_flags & PTL_RPC_FL_INTR) {
620 if (!(req->rq_flags & PTL_RPC_FL_TIMEOUT))
621 LBUG(); /* should only be interrupted if we timed out */
622 /* Clean up the dangling reply buffers */
624 GOTO(out, rc = -EINTR);
627 if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
628 GOTO(out, rc = -ETIMEDOUT);
630 if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
631 GOTO(out, rc = req->rq_status);
633 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
635 CERROR("unpack_rep failed: %d\n", rc);
639 /* FIXME: Enable when BlueArc makes new release */
640 if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
641 req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
642 CERROR("invalid packet type received (type=%u)\n",
643 req->rq_repmsg->type);
645 GOTO(out, rc = -EINVAL);
648 CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
649 if (req->rq_repmsg->status == 0)
650 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
651 req->rq_replen, req->rq_repmsg->status);
653 spin_lock(&conn->c_lock);
655 /* Requests that aren't from replayable imports, or which don't have
656 * transno information, can be "committed" early.
659 if ((req->rq_import->imp_flags & IMP_REPLAYABLE) == 0 ||
660 req->rq_repmsg->transno == 0) {
661 /* This import doesn't support replay, so we can just "commit"
664 DEBUG_REQ(D_HA, req, "not replayable, committing:");
665 list_del_init(&req->rq_list);
666 spin_unlock(&conn->c_lock);
667 ptlrpc_req_finished(req); /* Must be called unlocked. */
668 spin_lock(&conn->c_lock);
671 /* Replay-enabled imports return commit-status information. */
672 if (req->rq_import->imp_flags & IMP_REPLAYABLE) {
673 /* XXX this needs to be per-import, or multiple MDS services on
674 * XXX the same system are going to interfere messily with each
675 * XXX others' transno spaces.
677 conn->c_last_xid = req->rq_repmsg->last_xid;
678 conn->c_last_committed = req->rq_repmsg->last_committed;
679 ptlrpc_free_committed(conn);
682 spin_unlock(&conn->c_lock);
689 #undef EIO_IF_INVALID
691 int ptlrpc_replay_req(struct ptlrpc_request *req)
693 int rc = 0, old_level, old_status = 0;
694 // struct ptlrpc_client *cli = req->rq_import->imp_client;
695 struct l_wait_info lwi;
698 init_waitqueue_head(&req->rq_wait_for_rep);
699 DEBUG_REQ(D_NET, req, "");
701 req->rq_timeout = obd_timeout;
702 req->rq_reqmsg->addr = req->rq_import->imp_handle.addr;
703 req->rq_reqmsg->cookie = req->rq_import->imp_handle.cookie;
705 /* temporarily set request to RECOVD level (reset at out:) */
706 old_level = req->rq_level;
707 if (req->rq_flags & PTL_RPC_FL_REPLIED)
708 old_status = req->rq_repmsg->status;
709 req->rq_level = LUSTRE_CONN_RECOVD;
710 rc = ptl_send_rpc(req);
712 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
713 ptlrpc_cleanup_request_buf(req);
714 // up(&cli->cli_rpc_sem);
718 CDEBUG(D_OTHER, "-- sleeping\n");
719 lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
720 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
721 CDEBUG(D_OTHER, "-- done\n");
723 // up(&cli->cli_rpc_sem);
725 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
726 CERROR("Unknown reason for wakeup\n");
727 /* XXX Phil - I end up here when I kill obdctl */
729 GOTO(out, rc = -EINTR);
732 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
734 CERROR("unpack_rep failed: %d\n", rc);
738 CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
740 /* let the callback do fixups, possibly including in the request */
741 if (req->rq_replay_cb)
742 req->rq_replay_cb(req);
744 if ((req->rq_flags & PTL_RPC_FL_REPLIED) &&
745 req->rq_repmsg->status != old_status) {
746 DEBUG_REQ(D_HA, req, "status %d, old was %d",
747 req->rq_repmsg->status, old_status);
751 req->rq_level = old_level;