1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_RPC
27 #include <linux/lustre_ha.h>
29 void ptlrpc_init_client(struct recovd_obd *recovd,
30 int (*recover)(struct ptlrpc_client *recover),
32 int rep_portal, struct ptlrpc_client *cl)
34 memset(cl, 0, sizeof(*cl));
35 cl->cli_recovd = recovd;
36 cl->cli_recover = recover;
38 recovd_cli_manage(recovd, cl);
40 cl->cli_request_portal = req_portal;
41 cl->cli_reply_portal = rep_portal;
42 INIT_LIST_HEAD(&cl->cli_delayed_head);
43 INIT_LIST_HEAD(&cl->cli_sending_head);
44 INIT_LIST_HEAD(&cl->cli_dying_head);
45 spin_lock_init(&cl->cli_lock);
46 sema_init(&cl->cli_rpc_sem, 32);
49 __u8 *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
51 return req->rq_connection->c_remote_uuid;
54 struct ptlrpc_connection *ptlrpc_uuid_to_connection(char *uuid)
56 struct ptlrpc_connection *c;
57 struct lustre_peer peer;
60 err = kportal_uuid_to_peer(uuid, &peer);
62 CERROR("cannot find peer %s!\n", uuid);
66 c = ptlrpc_get_connection(&peer);
73 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn, char *uuid)
75 struct lustre_peer peer;
78 err = kportal_uuid_to_peer(uuid, &peer);
80 CERROR("cannot find peer %s!\n", uuid);
84 memcpy(&conn->c_peer, &peer, sizeof(peer));
88 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct ptlrpc_connection *conn)
90 struct ptlrpc_bulk_desc *bulk;
92 OBD_ALLOC(bulk, sizeof(*bulk));
94 bulk->b_connection = ptlrpc_connection_addref(conn);
95 init_waitqueue_head(&bulk->b_waitq);
96 INIT_LIST_HEAD(&bulk->b_page_list);
102 struct ptlrpc_bulk_page *ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc)
104 struct ptlrpc_bulk_page *page;
106 OBD_ALLOC(page, sizeof(*page));
109 ptl_set_inv_handle(&page->b_md_h);
110 ptl_set_inv_handle(&page->b_me_h);
111 list_add(&page->b_link, &desc->b_page_list);
112 desc->b_page_count++;
117 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk)
119 struct list_head *tmp, *next;
126 list_for_each_safe(tmp, next, &bulk->b_page_list) {
127 struct ptlrpc_bulk_page *page;
128 page = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
129 ptlrpc_free_bulk_page(page);
132 ptlrpc_put_connection(bulk->b_connection);
134 OBD_FREE(bulk, sizeof(*bulk));
138 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *page)
146 list_del(&page->b_link);
147 page->b_desc->b_page_count--;
148 OBD_FREE(page, sizeof(*page));
152 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl,
153 struct ptlrpc_connection *conn,
154 int opcode, int count, int *lengths,
157 struct ptlrpc_request *request;
161 OBD_ALLOC(request, sizeof(*request));
163 CERROR("request allocation out of memory\n");
167 rc = lustre_pack_msg(count, lengths, bufs,
168 &request->rq_reqlen, &request->rq_reqmsg);
170 CERROR("cannot pack request %d\n", rc);
174 request->rq_type = PTL_RPC_TYPE_REQUEST;
175 request->rq_connection = ptlrpc_connection_addref(conn);
177 request->rq_reqmsg->conn = (__u64)(unsigned long)conn->c_remote_conn;
178 request->rq_reqmsg->token = conn->c_remote_token;
179 request->rq_reqmsg->opc = HTON__u32(opcode);
180 request->rq_reqmsg->type = HTON__u32(PTL_RPC_MSG_REQUEST);
181 INIT_LIST_HEAD(&request->rq_list);
183 /* this will be dec()d once in req_finished, once in free_committed */
184 atomic_set(&request->rq_refcount, 2);
186 spin_lock(&conn->c_lock);
187 request->rq_reqmsg->xid = HTON__u32(++conn->c_xid_out);
188 request->rq_xid = conn->c_xid_out;
189 spin_unlock(&conn->c_lock);
191 request->rq_client = cl;
196 void ptlrpc_req_finished(struct ptlrpc_request *request)
201 if (request->rq_repmsg != NULL) {
202 OBD_FREE(request->rq_repmsg, request->rq_replen);
203 request->rq_repmsg = NULL;
204 request->rq_reply_md.start = NULL;
207 if (atomic_dec_and_test(&request->rq_refcount))
208 ptlrpc_free_req(request);
211 void ptlrpc_free_req(struct ptlrpc_request *request)
216 if (request->rq_repmsg != NULL)
217 OBD_FREE(request->rq_repmsg, request->rq_replen);
218 if (request->rq_reqmsg != NULL)
219 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
221 if (request->rq_client) {
222 spin_lock(&request->rq_client->cli_lock);
223 list_del_init(&request->rq_list);
224 spin_unlock(&request->rq_client->cli_lock);
227 ptlrpc_put_connection(request->rq_connection);
229 OBD_FREE(request, sizeof(*request));
232 static int ptlrpc_check_reply(struct ptlrpc_request *req)
236 if (req->rq_repmsg != NULL) {
237 req->rq_transno = NTOH__u64(req->rq_repmsg->transno);
238 req->rq_flags |= PTL_RPC_FL_REPLIED;
242 if (req->rq_flags & PTL_RPC_FL_RESEND) {
243 CERROR("-- RESEND --\n");
247 if (req->rq_flags & PTL_RPC_FL_RECOVERY) {
248 CERROR("-- RESTART --\n");
253 if (CURRENT_TIME - req->rq_time >= req->rq_timeout) {
254 CERROR("-- REQ TIMEOUT --\n");
255 /* clear the timeout */
257 req->rq_connection->c_level = LUSTRE_CONN_RECOVD;
258 req->rq_flags |= PTL_RPC_FL_TIMEOUT;
259 if (req->rq_client && req->rq_client->cli_recovd)
260 recovd_cli_fail(req->rq_client);
261 if (req->rq_level < LUSTRE_CONN_FULL)
268 if (req->rq_timeout) {
269 schedule_timeout(req->rq_timeout * HZ);
272 if (sigismember(&(current->pending.signal), SIGKILL) ||
273 sigismember(&(current->pending.signal), SIGTERM) ||
274 sigismember(&(current->pending.signal), SIGINT)) {
275 req->rq_flags |= PTL_RPC_FL_INTR;
283 int ptlrpc_check_status(struct ptlrpc_request *req, int err)
288 CERROR("err is %d\n", err);
293 CERROR("req == NULL\n");
297 if (req->rq_repmsg == NULL) {
298 CERROR("req->rq_repmsg == NULL\n");
302 if (req->rq_repmsg->type == NTOH__u32(PTL_RPC_MSG_ERR)) {
303 CERROR("req->rq_repmsg->type == PTL_RPC_MSG_ERR\n");
307 if (req->rq_repmsg->status != 0) {
308 CERROR("req->rq_repmsg->status is %d\n",
309 req->rq_repmsg->status);
310 /* XXX: translate this error from net to host */
311 RETURN(req->rq_repmsg->status);
317 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
319 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
320 request->rq_reqmsg = NULL;
321 request->rq_reqlen = 0;
324 /* Abort this request and cleanup any resources associated with it. */
325 static int ptlrpc_abort(struct ptlrpc_request *request)
327 /* First remove the ME for the reply; in theory, this means
328 * that we can tear down the buffer safely. */
329 PtlMEUnlink(request->rq_reply_me_h);
330 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
331 request->rq_repmsg = NULL;
332 request->rq_replen = 0;
336 /* caller must lock cli */
337 void ptlrpc_free_committed(struct ptlrpc_client *cli)
339 struct list_head *tmp, *saved;
340 struct ptlrpc_request *req;
342 list_for_each_safe(tmp, saved, &cli->cli_sending_head) {
343 req = list_entry(tmp, struct ptlrpc_request, rq_list);
345 if ( (req->rq_flags & PTL_RPC_FL_REPLAY) ) {
346 CDEBUG(D_INFO, "Retaining request %Ld for replay\n",
351 /* not yet committed */
352 if (!req->rq_transno ||
353 req->rq_transno > cli->cli_last_committed)
356 CDEBUG(D_INFO, "Marking request %Ld as committed ("
357 "transno=%Lu, last_committed=%Lu\n",
358 req->rq_xid, req->rq_transno,
359 cli->cli_last_committed);
360 if (atomic_dec_and_test(&req->rq_refcount)) {
361 /* we do this to prevent free_req deadlock */
362 list_del_init(&req->rq_list);
363 req->rq_client = NULL;
364 ptlrpc_free_req(req);
366 list_del_init(&req->rq_list);
367 list_add(&req->rq_list, &cli->cli_dying_head);
375 void ptlrpc_cleanup_client(struct ptlrpc_client *cli)
377 struct list_head *tmp, *saved;
378 struct ptlrpc_request *req;
381 spin_lock(&cli->cli_lock);
382 list_for_each_safe(tmp, saved, &cli->cli_sending_head) {
383 req = list_entry(tmp, struct ptlrpc_request, rq_list);
384 CDEBUG(D_INFO, "Cleaning req %p from sending list.\n", req);
385 list_del_init(&req->rq_list);
386 req->rq_client = NULL;
387 ptlrpc_free_req(req);
389 list_for_each_safe(tmp, saved, &cli->cli_dying_head) {
390 req = list_entry(tmp, struct ptlrpc_request, rq_list);
391 CERROR("Request %p is on the dying list at cleanup!\n", req);
392 list_del_init(&req->rq_list);
393 req->rq_client = NULL;
394 ptlrpc_free_req(req);
396 spin_unlock(&cli->cli_lock);
402 void ptlrpc_continue_req(struct ptlrpc_request *req)
405 CDEBUG(D_INODE, "continue delayed request %Ld opc %d\n",
406 req->rq_xid, req->rq_reqmsg->opc);
407 wake_up_interruptible(&req->rq_wait_for_rep);
411 void ptlrpc_resend_req(struct ptlrpc_request *req)
414 CDEBUG(D_INODE, "resend request %Ld, opc %d\n",
415 req->rq_xid, req->rq_reqmsg->opc);
416 req->rq_status = -EAGAIN;
417 req->rq_level = LUSTRE_CONN_RECOVD;
418 req->rq_flags |= PTL_RPC_FL_RESEND;
419 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
420 wake_up_interruptible(&req->rq_wait_for_rep);
424 void ptlrpc_restart_req(struct ptlrpc_request *req)
427 CDEBUG(D_INODE, "restart completed request %Ld, opc %d\n",
428 req->rq_xid, req->rq_reqmsg->opc);
429 req->rq_status = -ERESTARTSYS;
430 req->rq_flags |= PTL_RPC_FL_RECOVERY;
431 req->rq_flags &= ~PTL_RPC_FL_TIMEOUT;
432 wake_up_interruptible(&req->rq_wait_for_rep);
436 int ptlrpc_queue_wait(struct ptlrpc_request *req)
439 struct ptlrpc_client *cli = req->rq_client;
442 init_waitqueue_head(&req->rq_wait_for_rep);
443 CDEBUG(D_NET, "subsys: %s req %Ld opc %d level %d, conn level %d\n",
444 cli->cli_name, req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
445 req->rq_connection->c_level);
447 /* XXX probably both an import and connection level are needed */
448 if (req->rq_level > req->rq_connection->c_level) {
449 CERROR("process %d waiting for recovery\n", current->pid);
450 spin_lock(&cli->cli_lock);
451 list_del_init(&req->rq_list);
452 list_add(&req->rq_list, cli->cli_delayed_head.prev);
453 spin_unlock(&cli->cli_lock);
454 wait_event_interruptible
455 (req->rq_wait_for_rep,
456 req->rq_level <= req->rq_connection->c_level);
457 spin_lock(&cli->cli_lock);
458 list_del_init(&req->rq_list);
459 spin_unlock(&cli->cli_lock);
460 CERROR("process %d resumed\n", current->pid);
463 req->rq_time = CURRENT_TIME;
464 req->rq_timeout = 30;
465 rc = ptl_send_rpc(req);
467 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
470 ptlrpc_cleanup_request_buf(req);
471 up(&cli->cli_rpc_sem);
475 spin_lock(&cli->cli_lock);
476 list_del_init(&req->rq_list);
477 list_add(&req->rq_list, cli->cli_sending_head.prev);
478 spin_unlock(&cli->cli_lock);
480 CDEBUG(D_OTHER, "-- sleeping\n");
481 wait_event_interruptible(req->rq_wait_for_rep,
482 ptlrpc_check_reply(req));
483 CDEBUG(D_OTHER, "-- done\n");
485 if (req->rq_flags & PTL_RPC_FL_RESEND) {
486 req->rq_flags &= ~PTL_RPC_FL_RESEND;
490 up(&cli->cli_rpc_sem);
491 if (req->rq_flags & PTL_RPC_FL_TIMEOUT)
492 GOTO(out, rc = -ETIMEDOUT);
494 if (req->rq_flags & PTL_RPC_FL_INTR) {
495 /* Clean up the dangling reply buffers */
497 GOTO(out, rc = -EINTR);
500 if (!(req->rq_flags & PTL_RPC_FL_REPLIED))
501 GOTO(out, rc = req->rq_status);
503 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
505 CERROR("unpack_rep failed: %d\n", rc);
508 CDEBUG(D_NET, "got rep %d\n", req->rq_repmsg->xid);
509 if (req->rq_repmsg->status == 0)
510 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
511 req->rq_replen, req->rq_repmsg->status);
513 spin_lock(&cli->cli_lock);
514 cli->cli_last_rcvd = req->rq_repmsg->last_rcvd;
515 cli->cli_last_committed = req->rq_repmsg->last_committed;
516 ptlrpc_free_committed(cli);
517 spin_unlock(&cli->cli_lock);
524 int ptlrpc_replay_req(struct ptlrpc_request *req)
527 struct ptlrpc_client *cli = req->rq_client;
530 init_waitqueue_head(&req->rq_wait_for_rep);
531 CERROR("req %Ld opc %d level %d, conn level %d\n",
532 req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
533 req->rq_connection->c_level);
535 req->rq_time = CURRENT_TIME;
537 rc = ptl_send_rpc(req);
539 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
540 ptlrpc_cleanup_request_buf(req);
541 up(&cli->cli_rpc_sem);
545 CDEBUG(D_OTHER, "-- sleeping\n");
546 wait_event_interruptible(req->rq_wait_for_rep,
547 ptlrpc_check_reply(req));
548 CDEBUG(D_OTHER, "-- done\n");
550 up(&cli->cli_rpc_sem);
552 if (!(req->rq_flags & PTL_RPC_FL_REPLIED)) {
553 CERROR("Unknown reason for wakeup\n");
554 /* XXX Phil - I end up here when I kill obdctl */
556 GOTO(out, rc = -EINTR);
559 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
561 CERROR("unpack_rep failed: %d\n", rc);
565 CDEBUG(D_NET, "got rep %d\n", req->rq_repmsg->xid);
566 if (req->rq_repmsg->status == 0)
567 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repmsg,
568 req->rq_replen, req->rq_repmsg->status);
570 CERROR("recovery failed: ");
571 CERROR("req %Ld opc %d level %d, conn level %d\n",
572 req->rq_xid, req->rq_reqmsg->opc, req->rq_level,
573 req->rq_connection->c_level);