Whamcloud - gitweb
WARNING: we currently crash on unmount after the last phase of runtests.
[fs/lustre-release.git] / lustre / llite / recover.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Light Super operations
5  *
6  * This code is issued under the GNU General Public License.
7  * See the file COPYING in this distribution
8  *
9  * Copryright (C) 1996 Peter J. Braam <braam@stelias.com>
10  * Copryright (C) 1999 Stelias Computing Inc. <braam@stelias.com>
11  * Copryright (C) 1999 Seagate Technology Inc.
12  * Copryright (C) 2001 Mountain View Data, Inc.
13  * Copryright (C) 2002 Cluster File Systems, Inc.
14  *
15  */
16
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <linux/kmod.h>
20
21 #define DEBUG_SUBSYSTEM S_LLITE
22
23 #include <linux/lustre_lite.h>
24 #include <linux/lustre_ha.h>
25
26 static int ll_reconnect(struct ll_sb_info *sbi)
27 {
28         struct ll_fid rootfid;
29         __u64 last_committed;
30         __u64 last_xid;
31         int err;
32         struct ptlrpc_request *request; 
33         struct ptlrpc_connection *conn = sbi2mdc(sbi)->cl_import.imp_connection;
34
35         ptlrpc_readdress_connection(conn, "mds");
36
37         conn->c_level = LUSTRE_CONN_CON;
38
39         /* XXX: need to store the last_* values somewhere */
40         err = mdc_getstatus(&sbi->ll_mdc_conn, &rootfid, &last_committed,
41                             &last_xid, &request);
42         if (err) {
43                 CERROR("cannot mds_connect: rc = %d\n", err);
44                 GOTO(out_disc, err = -ENOTCONN);
45         }
46         conn->c_last_xid = last_xid;
47         conn->c_level = LUSTRE_CONN_RECOVD;
48
49  out_disc:
50         return err;
51 }
52
53 static int ll_recover_upcall(struct ptlrpc_connection *conn)
54 {
55         char *argv[3];
56         char *envp[3];
57
58         ENTRY;
59         conn->c_level = LUSTRE_CONN_RECOVD;
60
61         argv[0] = obd_recovery_upcall;
62         argv[1] = conn->c_remote_uuid;
63         argv[2] = NULL;
64
65         envp[0] = "HOME=/";
66         envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
67         envp[2] = NULL;
68
69         RETURN(call_usermodehelper(argv[0], argv, envp));
70 }
71
72 static int ll_recover_reconnect(struct ptlrpc_connection *conn)
73 {
74         RETURN(-ENOSYS);
75 #if 0
76         /* XXXshaver this code needs to know about connection-driven recovery! */
77
78         struct ptlrpc_request *req;
79         struct list_head *tmp, *pos;
80         struct ll_sb_info *sbi = cli->cli_data;
81         struct ptlrpc_connection *conn = cli->cli_connection;
82         int rc = 0;
83         ENTRY;
84
85         /* 1. reconnect */
86         ll_reconnect(sbi);
87         
88         /* 2. walk the request list */
89         spin_lock(&conn->c_lock);
90         list_for_each_safe(tmp, pos, &conn->c_sending_head) { 
91                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
92                 
93                 /* replay what needs to be replayed */
94                 if (req->rq_flags & PTL_RPC_FL_REPLAY) {
95                         CDEBUG(D_INODE, "req %Ld needs replay [last rcvd %Ld]\n",
96                                req->rq_xid, conn->c_last_xid);
97 #error We should not hold a spinlock over such a lengthy operation.
98 #error If necessary, drop spinlock, do operation, re-get spinlock, restart loop.
99 #error If we need to avoid re-processint items, then delete them from the list
100 #error as they are replayed and re-add at the tail of this list, so the next
101 #error item to process will always be at the head of the list.
102                         rc = ptlrpc_replay_req(req);
103                         if (rc) {
104                                 CERROR("recovery replay error %d for req %Ld\n",
105                                        rc, req->rq_xid);
106                                 GOTO(out, rc);
107                         }
108                 }
109
110                 /* server has seen req, we have reply: skip */
111                 if ((req->rq_flags & PTL_RPC_FL_REPLIED)  &&
112                     req->rq_xid <= conn->c_last_xid) { 
113                         CDEBUG(D_INODE,
114                                "req %Ld was complete: skip [last rcvd %Ld]\n", 
115                                req->rq_xid, conn->c_last_xid);
116                         continue;
117                 }
118
119                 /* server has lost req, we have reply: resend, ign reply */
120                 if ((req->rq_flags & PTL_RPC_FL_REPLIED)  &&
121                     req->rq_xid > conn->c_last_xid) { 
122                         CDEBUG(D_INODE, "lost req %Ld have rep: replay [last "
123                                "rcvd %Ld]\n", req->rq_xid, conn->c_last_xid);
124                         rc = ptlrpc_replay_req(req); 
125                         if (rc) {
126                                 CERROR("request resend error %d for req %Ld\n", 
127                                        rc, req->rq_xid); 
128                                 GOTO(out, rc);
129                         }
130                 }
131
132                 /* server has seen req, we have lost reply: -ERESTARTSYS */
133                 if ( !(req->rq_flags & PTL_RPC_FL_REPLIED)  &&
134                      req->rq_xid <= conn->c_last_xid) { 
135                         CDEBUG(D_INODE, "lost rep %Ld srv did req: restart "
136                                "[last rcvd %Ld]\n", 
137                                req->rq_xid, conn->c_last_xid);
138                         ptlrpc_restart_req(req);
139                 }
140
141                 /* service has not seen req, no reply: resend */
142                 if ( !(req->rq_flags & PTL_RPC_FL_REPLIED)  &&
143                      req->rq_xid > conn->c_last_xid) {
144                         CDEBUG(D_INODE,
145                                "lost rep/req %Ld: resend [last rcvd %Ld]\n", 
146                                req->rq_xid, conn->c_last_xid);
147                         ptlrpc_resend_req(req);
148                 }
149
150         }
151
152         sbi2mdc(sbi)->cl_conn->c_level = LUSTRE_CONN_FULL;
153         recovd_conn_fixed(conn);
154
155         /* Finally, continue what we delayed since recovery started */
156         list_for_each_safe(tmp, pos, &conn->c_delayed_head) { 
157                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
158                 ptlrpc_continue_req(req);
159         }
160
161         EXIT;
162  out:
163         spin_unlock(&conn->c_lock);
164         return rc;
165 #endif
166 }
167
168 int ll_recover(struct recovd_data *rd, int phase)
169 {
170         struct ptlrpc_connection *conn = class_rd2conn(rd);
171
172         LASSERT(conn);
173         ENTRY;
174
175         switch (phase) {
176             case PTLRPC_RECOVD_PHASE_PREPARE:
177                 RETURN(ll_recover_upcall(conn));
178             case PTLRPC_RECOVD_PHASE_RECOVER:
179                 RETURN(ll_recover_reconnect(conn));
180             case PTLRPC_RECOVD_PHASE_FAILURE:
181                 fixme();
182                 RETURN(0);
183         }
184
185         LBUG();
186         RETURN(-ENOSYS);
187 }