Whamcloud - gitweb
land 1.0.1 fixes on main development branch (head)
[fs/lustre-release.git] / lustre / ldlm / ldlm_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #ifndef EXPORT_SYMTAB
23 # define EXPORT_SYMTAB
24 #endif
25 #define DEBUG_SUBSYSTEM S_LDLM
26
27 #ifdef __KERNEL__
28 # include <linux/module.h>
29 #else
30 # include <liblustre.h>
31 #endif
32 #include <linux/obd.h>
33 #include <linux/obd_ost.h> /* for LUSTRE_OSC_NAME */
34 #include <linux/lustre_mds.h> /* for LUSTRE_MDC_NAME */
35 #include <linux/lustre_mgmt.h>
36 #include <linux/lustre_dlm.h>
37 #include <linux/lustre_net.h>
38
39 int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf)
40 {
41         struct ptlrpc_connection *conn;
42         struct lustre_cfg* lcfg = buf;
43         struct client_obd *cli = &obddev->u.cli;
44         struct obd_import *imp;
45         struct obd_uuid server_uuid;
46         int rq_portal, rp_portal, connect_op;
47         char *name = obddev->obd_type->typ_name;
48         char *mgmt_name = NULL;
49         int rc = 0;
50         struct obd_device *mgmt_obd;
51         mgmtcli_register_for_events_t register_f;
52         ENTRY;
53
54         /* In a more perfect world, we would hang a ptlrpc_client off of
55          * obd_type and just use the values from there. */
56         if (!strcmp(name, LUSTRE_OSC_NAME)) {
57                 rq_portal = OST_REQUEST_PORTAL;
58                 rp_portal = OSC_REPLY_PORTAL;
59                 connect_op = OST_CONNECT;
60         } else if (!strcmp(name, LUSTRE_MDC_NAME)) {
61                 rq_portal = MDS_REQUEST_PORTAL;
62                 rp_portal = MDC_REPLY_PORTAL;
63                 connect_op = MDS_CONNECT;
64         } else if (!strcmp(name, LUSTRE_MGMTCLI_NAME)) {
65                 rq_portal = MGMT_REQUEST_PORTAL;
66                 rp_portal = MGMT_REPLY_PORTAL;
67                 connect_op = MGMT_CONNECT;
68         } else {
69                 CERROR("unknown client OBD type \"%s\", can't setup\n",
70                        name);
71                 RETURN(-EINVAL);
72         }
73
74         if (lcfg->lcfg_inllen1 < 1) {
75                 CERROR("requires a TARGET UUID\n");
76                 RETURN(-EINVAL);
77         }
78
79         if (lcfg->lcfg_inllen1 > 37) {
80                 CERROR("client UUID must be less than 38 characters\n");
81                 RETURN(-EINVAL);
82         }
83
84         if (lcfg->lcfg_inllen2 < 1) {
85                 CERROR("setup requires a SERVER UUID\n");
86                 RETURN(-EINVAL);
87         }
88
89         if (lcfg->lcfg_inllen2 > 37) {
90                 CERROR("target UUID must be less than 38 characters\n");
91                 RETURN(-EINVAL);
92         }
93
94         sema_init(&cli->cl_sem, 1);
95         cli->cl_conn_count = 0;
96         memcpy(server_uuid.uuid, lcfg->lcfg_inlbuf2, MIN(lcfg->lcfg_inllen2,
97                                                         sizeof(server_uuid)));
98
99         init_MUTEX(&cli->cl_dirty_sem);
100         cli->cl_dirty = 0;
101         cli->cl_dirty_granted = 0;
102         cli->cl_dirty_max = 64*1024*1024; /* some default */
103         cli->cl_ost_can_grant = 1;
104         INIT_LIST_HEAD(&cli->cl_cache_waiters);
105         INIT_LIST_HEAD(&cli->cl_loi_ready_list);
106         spin_lock_init(&cli->cl_loi_list_lock);
107         cli->cl_brw_in_flight = 0;
108         spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
109         spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
110         spin_lock_init(&cli->cl_read_page_hist.oh_lock);
111         spin_lock_init(&cli->cl_write_page_hist.oh_lock);
112         cli->cl_max_pages_per_rpc = PTL_MD_MAX_PAGES;
113         cli->cl_max_rpcs_in_flight = 8;
114
115         ldlm_get_ref();
116         if (rc) {
117                 CERROR("ldlm_get_ref failed: %d\n", rc);
118                 GOTO(err, rc);
119         }
120
121         conn = ptlrpc_uuid_to_connection(&server_uuid);
122         if (conn == NULL)
123                 GOTO(err_ldlm, rc = -ENOENT);
124
125         ptlrpc_init_client(rq_portal, rp_portal, name,
126                            &obddev->obd_ldlm_client);
127
128         imp = class_new_import();
129         if (imp == NULL) {
130                 ptlrpc_put_connection(conn);
131                 GOTO(err_ldlm, rc = -ENOENT);
132         }
133         imp->imp_connection = conn;
134         imp->imp_client = &obddev->obd_ldlm_client;
135         imp->imp_obd = obddev;
136         imp->imp_connect_op = connect_op;
137         imp->imp_generation = 0;
138         INIT_LIST_HEAD(&imp->imp_pinger_chain);
139         memcpy(imp->imp_target_uuid.uuid, lcfg->lcfg_inlbuf1,
140               lcfg->lcfg_inllen1);
141         class_import_put(imp);
142
143         cli->cl_import = imp;
144         cli->cl_max_mds_easize = sizeof(struct lov_mds_md);
145         cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
146         cli->cl_sandev = to_kdev_t(0);
147
148         if (lcfg->lcfg_inllen3 != 0) {
149                 if (!strcmp(lcfg->lcfg_inlbuf3, "inactive")) {
150                         CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
151                                name, obddev->obd_name,
152                                imp->imp_target_uuid.uuid);
153                         imp->imp_invalid = 1;
154
155                         if (lcfg->lcfg_inllen4 != 0)
156                                 mgmt_name = lcfg->lcfg_inlbuf4;
157                 } else {
158                         mgmt_name = lcfg->lcfg_inlbuf3;
159                 }
160         }
161
162         if (mgmt_name != NULL) {
163                 /* Register with management client if we need to. */
164                 CDEBUG(D_HA, "%s registering with %s for events about %s\n",
165                        obddev->obd_name, mgmt_name, server_uuid.uuid);
166
167                 mgmt_obd = class_name2obd(mgmt_name);
168                 if (!mgmt_obd) {
169                         CERROR("can't find mgmtcli %s to register\n",
170                                mgmt_name);
171                         GOTO(err_import, rc = -ENOSYS);
172                 }
173
174                 register_f = inter_module_get("mgmtcli_register_for_events");
175                 if (!register_f) {
176                         CERROR("can't i_m_g mgmtcli_register_for_events\n");
177                         GOTO(err_import, rc = -ENOSYS);
178                 }
179
180                 rc = register_f(mgmt_obd, obddev, &imp->imp_target_uuid);
181                 inter_module_put("mgmtcli_register_for_events");
182
183                 if (!rc)
184                         cli->cl_mgmtcli_obd = mgmt_obd;
185         }
186
187         RETURN(rc);
188
189 err_import:
190         class_destroy_import(imp);
191 err_ldlm:
192         ldlm_put_ref(0);
193 err:
194         RETURN(rc);
195
196 }
197
198 int client_obd_cleanup(struct obd_device *obddev, int flags)
199 {
200         struct client_obd *cli = &obddev->u.cli;
201
202         if (!cli->cl_import)
203                 RETURN(-EINVAL);
204         if (cli->cl_mgmtcli_obd) {
205                 mgmtcli_deregister_for_events_t dereg_f;
206
207                 dereg_f = inter_module_get("mgmtcli_deregister_for_events");
208                 dereg_f(cli->cl_mgmtcli_obd, obddev);
209                 inter_module_put("mgmtcli_deregister_for_events");
210         }
211         class_destroy_import(cli->cl_import);
212         cli->cl_import = NULL;
213
214         ldlm_put_ref(flags & OBD_OPT_FORCE);
215
216         RETURN(0);
217 }
218
219 int client_connect_import(struct lustre_handle *dlm_handle,
220                           struct obd_device *obd,
221                           struct obd_uuid *cluuid)
222 {
223         struct client_obd *cli = &obd->u.cli;
224         struct obd_import *imp = cli->cl_import;
225         struct obd_export *exp;
226         int rc;
227         ENTRY;
228
229         down(&cli->cl_sem);
230         rc = class_connect(dlm_handle, obd, cluuid);
231         if (rc)
232                 GOTO(out_sem, rc);
233
234         cli->cl_conn_count++;
235         if (cli->cl_conn_count > 1)
236                 GOTO(out_sem, rc);
237         exp = class_conn2export(dlm_handle);
238
239         if (obd->obd_namespace != NULL)
240                 CERROR("already have namespace!\n");
241         obd->obd_namespace = ldlm_namespace_new(obd->obd_name,
242                                                 LDLM_NAMESPACE_CLIENT);
243         if (obd->obd_namespace == NULL)
244                 GOTO(out_disco, rc = -ENOMEM);
245
246         imp->imp_dlm_handle = *dlm_handle;
247         imp->imp_state = LUSTRE_IMP_DISCON;
248
249         rc = ptlrpc_connect_import(imp);
250         if (rc != 0) {
251                 LASSERT (imp->imp_state == LUSTRE_IMP_DISCON);
252                 GOTO(out_ldlm, rc);
253         }
254
255         LASSERT (imp->imp_state == LUSTRE_IMP_FULL);
256
257         exp->exp_connection = ptlrpc_connection_addref(imp->imp_connection);
258
259         if (imp->imp_replayable) {
260                 CDEBUG(D_HA, "connected to replayable target: %s\n",
261                        imp->imp_target_uuid.uuid);
262                 ptlrpc_pinger_add_import(imp);
263         }
264
265         CDEBUG(D_HA, "local import: %p, remote handle: "LPX64"\n", imp,
266                imp->imp_remote_handle.cookie);
267
268         EXIT;
269
270         if (rc) {
271 out_ldlm:
272                 ldlm_namespace_free(obd->obd_namespace, 0);
273                 obd->obd_namespace = NULL;
274 out_disco:
275                 cli->cl_conn_count--;
276                 class_disconnect(exp, 0);
277         } else {
278                 class_export_put(exp);
279         }
280 out_sem:
281         up(&cli->cl_sem);
282         return rc;
283 }
284
285 int client_disconnect_export(struct obd_export *exp, int failover)
286 {
287         struct obd_device *obd = class_exp2obd(exp);
288         struct client_obd *cli = &obd->u.cli;
289         struct obd_import *imp = cli->cl_import;
290         int rc = 0, err;
291         ENTRY;
292
293         if (!obd) {
294                 CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
295                        exp, exp ? exp->exp_handle.h_cookie : -1);
296                 RETURN(-EINVAL);
297         }
298
299         down(&cli->cl_sem);
300         if (!cli->cl_conn_count) {
301                 CERROR("disconnecting disconnected device (%s)\n",
302                        obd->obd_name);
303                 GOTO(out_sem, rc = -EINVAL);
304         }
305
306         cli->cl_conn_count--;
307         if (cli->cl_conn_count)
308                 GOTO(out_no_disconnect, rc = 0);
309
310         /* Some non-replayable imports (MDS's OSCs) are pinged, so just
311          * delete it regardless.  (It's safe to delete an import that was
312          * never added.) */
313         (void)ptlrpc_pinger_del_import(imp);
314
315         if (obd->obd_namespace != NULL) {
316                 /* obd_no_recov == local only */
317                 ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
318                                        obd->obd_no_recov, NULL);
319                 ldlm_namespace_free(obd->obd_namespace, obd->obd_no_recov);
320                 obd->obd_namespace = NULL;
321         }
322
323         /* Yeah, obd_no_recov also (mainly) means "forced shutdown". */
324         if (obd->obd_no_recov)
325                 ptlrpc_set_import_active(imp, 0);
326         else
327                 rc = ptlrpc_disconnect_import(imp);
328
329         imp->imp_state = LUSTRE_IMP_NEW;
330
331         EXIT;
332  out_no_disconnect:
333         err = class_disconnect(exp, 0);
334         if (!rc && err)
335                 rc = err;
336  out_sem:
337         up(&cli->cl_sem);
338         RETURN(rc);
339 }
340
341 /* --------------------------------------------------------------------------
342  * from old lib/target.c
343  * -------------------------------------------------------------------------- */
344
345 int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
346                             struct obd_uuid *cluuid)
347 {
348         if (exp->exp_connection) {
349                 struct lustre_handle *hdl;
350                 hdl = &exp->exp_imp_reverse->imp_remote_handle;
351                 /* Might be a re-connect after a partition. */
352                 if (!memcmp(&conn->cookie, &hdl->cookie, sizeof conn->cookie)) {
353                         CERROR("%s reconnecting\n", cluuid->uuid);
354                         conn->cookie = exp->exp_handle.h_cookie;
355                         RETURN(EALREADY);
356                 } else {
357                         CERROR("%s reconnecting from %s, "
358                                "handle mismatch (ours "LPX64", theirs "
359                                LPX64")\n", cluuid->uuid,
360                                exp->exp_connection->c_remote_uuid.uuid,
361                                hdl->cookie, conn->cookie);
362                         memset(conn, 0, sizeof *conn);
363                         RETURN(-EALREADY);
364                 }
365         }
366
367         conn->cookie = exp->exp_handle.h_cookie;
368         CDEBUG(D_INFO, "existing export for UUID '%s' at %p\n",
369                cluuid->uuid, exp);
370         CDEBUG(D_IOCTL,"connect: cookie "LPX64"\n", conn->cookie);
371         RETURN(0);
372 }
373
374 int target_handle_connect(struct ptlrpc_request *req, svc_handler_t handler)
375 {
376         struct obd_device *target;
377         struct obd_export *export = NULL;
378         struct obd_import *revimp;
379         struct lustre_handle conn;
380         struct obd_uuid tgtuuid;
381         struct obd_uuid cluuid;
382         struct obd_uuid remote_uuid;
383         struct list_head *p;
384         char *str, *tmp;
385         int rc = 0, abort_recovery;
386         ENTRY;
387
388         LASSERT_REQSWAB (req, 0);
389         str = lustre_msg_string(req->rq_reqmsg, 0, sizeof(tgtuuid) - 1);
390         if (str == NULL) {
391                 CERROR("bad target UUID for connect\n");
392                 GOTO(out, rc = -EINVAL);
393         }
394
395         obd_str2uuid (&tgtuuid, str);
396         target = class_uuid2obd(&tgtuuid);
397         if (!target) {
398                 target = class_name2obd(str);
399         }
400
401         if (!target || target->obd_stopping || !target->obd_set_up) {
402                 CERROR("UUID '%s' is not available for connect\n", str);
403                 GOTO(out, rc = -ENODEV);
404         }
405
406         LASSERT_REQSWAB (req, 1);
407         str = lustre_msg_string(req->rq_reqmsg, 1, sizeof(cluuid) - 1);
408         if (str == NULL) {
409                 CERROR("bad client UUID for connect\n");
410                 GOTO(out, rc = -EINVAL);
411         }
412
413         obd_str2uuid (&cluuid, str);
414
415         /* XXX extract a nettype and format accordingly */
416         snprintf(remote_uuid.uuid, sizeof remote_uuid,
417                  "NET_"LPX64"_UUID", req->rq_peer.peer_nid);
418
419         spin_lock_bh(&target->obd_processing_task_lock);
420         abort_recovery = target->obd_abort_recovery;
421         spin_unlock_bh(&target->obd_processing_task_lock);
422         if (abort_recovery)
423                 target_abort_recovery(target);
424
425         tmp = lustre_msg_buf(req->rq_reqmsg, 2, sizeof conn);
426         if (tmp == NULL)
427                 GOTO(out, rc = -EPROTO);
428
429         memcpy(&conn, tmp, sizeof conn);
430
431         rc = lustre_pack_reply(req, 0, NULL, NULL);
432         if (rc)
433                 GOTO(out, rc);
434
435         /* lctl gets a backstage, all-access pass. */
436         if (obd_uuid_equals(&cluuid, &target->obd_uuid))
437                 goto dont_check_exports;
438
439         spin_lock(&target->obd_dev_lock);
440         list_for_each(p, &target->obd_exports) {
441                 export = list_entry(p, struct obd_export, exp_obd_chain);
442                 if (obd_uuid_equals(&cluuid, &export->exp_client_uuid)) {
443                         spin_unlock(&target->obd_dev_lock);
444                         LASSERT(export->exp_obd == target);
445
446                         rc = target_handle_reconnect(&conn, export, &cluuid);
447                         break;
448                 }
449                 export = NULL;
450         }
451         /* If we found an export, we already unlocked. */
452         if (!export) {
453                 spin_unlock(&target->obd_dev_lock);
454         } else if (req->rq_reqmsg->conn_cnt == 1) {
455                 CERROR("%s reconnected with 1 conn_cnt; cookies not random?\n",
456                        cluuid.uuid);
457                 GOTO(out, rc = -EALREADY);
458         }
459
460         /* Tell the client if we're in recovery. */
461         /* If this is the first client, start the recovery timer */
462         if (target->obd_recovering) {
463                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECOVERING);
464                 target_start_recovery_timer(target, handler);
465         }
466
467         /* Tell the client if we support replayable requests */
468         if (target->obd_replayable)
469                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_REPLAYABLE);
470
471         if (export == NULL) {
472                 if (target->obd_recovering) {
473                         CERROR("denying connection for new client %s: "
474                                "%d clients in recovery for %lds\n", cluuid.uuid,
475                                target->obd_recoverable_clients,
476                                (target->obd_recovery_timer.expires-jiffies)/HZ);
477                         rc = -EBUSY;
478                 } else {
479  dont_check_exports:
480                         rc = obd_connect(&conn, target, &cluuid);
481                 }
482         }
483
484         /* If all else goes well, this is our RPC return code. */
485         req->rq_status = 0;
486
487         if (rc && rc != EALREADY)
488                 GOTO(out, rc);
489
490         /* XXX track this all the time? */
491         if (target->obd_recovering) {
492                 target->obd_connected_clients++;
493         }
494
495         req->rq_repmsg->handle = conn;
496
497         /* If the client and the server are the same node, we will already
498          * have an export that really points to the client's DLM export,
499          * because we have a shared handles table.
500          *
501          * XXX this will go away when shaver stops sending the "connect" handle
502          * in the real "remote handle" field of the request --phik 24 Apr 2003
503          */
504         if (req->rq_export != NULL)
505                 class_export_put(req->rq_export);
506
507         /* ownership of this export ref transfers to the request */
508         export = req->rq_export = class_conn2export(&conn);
509         LASSERT(export != NULL);
510
511         if (req->rq_connection != NULL)
512                 ptlrpc_put_connection(req->rq_connection);
513         if (export->exp_connection != NULL)
514                 ptlrpc_put_connection(export->exp_connection);
515         export->exp_connection = ptlrpc_get_connection(&req->rq_peer,
516                                                        &remote_uuid);
517         req->rq_connection = ptlrpc_connection_addref(export->exp_connection);
518
519         LASSERT(export->exp_conn_cnt < req->rq_reqmsg->conn_cnt);
520         export->exp_conn_cnt = req->rq_reqmsg->conn_cnt;
521
522         if (rc == EALREADY) {
523                 /* We indicate the reconnection in a flag, not an error code. */
524                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECONNECT);
525                 GOTO(out, rc = 0);
526         }
527
528         memcpy(&conn, lustre_msg_buf(req->rq_reqmsg, 2, sizeof conn),
529                sizeof conn);
530
531         if (export->exp_imp_reverse != NULL)
532                 class_destroy_import(export->exp_imp_reverse);
533         revimp = export->exp_imp_reverse = class_new_import();
534         revimp->imp_connection = ptlrpc_connection_addref(req->rq_connection);
535         revimp->imp_client = &export->exp_obd->obd_ldlm_client;
536         revimp->imp_remote_handle = conn;
537         revimp->imp_obd = target;
538         revimp->imp_dlm_fake = 1;
539         revimp->imp_state = LUSTRE_IMP_FULL;
540         class_import_put(revimp);
541 out:
542         if (rc)
543                 req->rq_status = rc;
544         RETURN(rc);
545 }
546
547 int target_handle_disconnect(struct ptlrpc_request *req)
548 {
549         int rc;
550         ENTRY;
551
552         rc = lustre_pack_reply(req, 0, NULL, NULL);
553         if (rc)
554                 RETURN(rc);
555
556         req->rq_status = obd_disconnect(req->rq_export, 0);
557         req->rq_export = NULL;
558         RETURN(0);
559 }
560
561 void target_destroy_export(struct obd_export *exp)
562 {
563         /* exports created from last_rcvd data, and "fake"
564            exports created by lctl don't have an import */
565         if (exp->exp_imp_reverse != NULL)
566                 class_destroy_import(exp->exp_imp_reverse);
567
568         /* We cancel locks at disconnect time, but this will catch any locks
569          * granted in a race with recovery-induced disconnect. */
570         ldlm_cancel_locks_for_export(exp);
571 }
572
573 /*
574  * Recovery functions
575  */
576
577 void target_cancel_recovery_timer(struct obd_device *obd)
578 {
579         del_timer(&obd->obd_recovery_timer);
580 }
581
582 static void abort_delayed_replies(struct obd_device *obd)
583 {
584         struct ptlrpc_request *req;
585         struct list_head *tmp, *n;
586         list_for_each_safe(tmp, n, &obd->obd_delayed_reply_queue) {
587                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
588                 DEBUG_REQ(D_ERROR, req, "aborted:");
589                 req->rq_status = -ENOTCONN;
590                 req->rq_type = PTL_RPC_MSG_ERR;
591                 ptlrpc_reply(req);
592                 list_del(&req->rq_list);
593                 OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
594                 OBD_FREE(req, sizeof *req);
595         }
596 }
597
598 static void abort_recovery_queue(struct obd_device *obd)
599 {
600         struct ptlrpc_request *req;
601         struct list_head *tmp, *n;
602         int rc;
603
604         list_for_each_safe(tmp, n, &obd->obd_recovery_queue) {
605                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
606                 DEBUG_REQ(D_ERROR, req, "aborted:");
607                 req->rq_status = -ENOTCONN;
608                 req->rq_type = PTL_RPC_MSG_ERR;
609                 rc = lustre_pack_reply(req, 0, NULL, NULL);
610                 if (rc == 0) {
611                         ptlrpc_reply(req);
612                 } else {
613                         DEBUG_REQ(D_ERROR, req,
614                                   "packing failed for abort-reply; skipping");
615                 }
616                 list_del(&req->rq_list);
617                 class_export_put(req->rq_export);
618                 OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
619                 OBD_FREE(req, sizeof *req);
620         }
621 }
622
623 void target_abort_recovery(void *data)
624 {
625         struct obd_device *obd = data;
626         int rc;
627
628         CERROR("disconnecting clients and aborting recovery\n");
629         spin_lock_bh(&obd->obd_processing_task_lock);
630         if (!obd->obd_recovering) {
631                 spin_unlock_bh(&obd->obd_processing_task_lock);
632                 EXIT;
633                 return;
634         }
635
636         obd->obd_recovering = obd->obd_abort_recovery = 0;
637
638         wake_up(&obd->obd_next_transno_waitq);
639         target_cancel_recovery_timer(obd);
640         spin_unlock_bh(&obd->obd_processing_task_lock);
641
642         class_disconnect_exports(obd, 0);
643
644         /* when recovery was abort, cleanup orphans for mds */
645         if (OBT(obd) && OBP(obd, postrecov)) {
646                 rc = OBP(obd, postrecov)(obd);
647                 if (rc >= 0)
648                         CWARN("Cleanup %d orphans after recovery was aborted\n", rc);
649                 else
650                         CERROR("postrecov failed %d\n", rc);
651         }
652
653         abort_delayed_replies(obd);
654         abort_recovery_queue(obd);
655         ptlrpc_run_recovery_over_upcall(obd);
656 }
657
658 static void target_recovery_expired(unsigned long castmeharder)
659 {
660         struct obd_device *obd = (struct obd_device *)castmeharder;
661         CERROR("recovery timed out, aborting\n");
662         spin_lock_bh(&obd->obd_processing_task_lock);
663         obd->obd_abort_recovery = 1;
664         wake_up(&obd->obd_next_transno_waitq);
665         spin_unlock_bh(&obd->obd_processing_task_lock);
666 }
667
668 static void reset_recovery_timer(struct obd_device *obd)
669 {
670         int recovering;
671         spin_lock(&obd->obd_dev_lock);
672         recovering = obd->obd_recovering;
673         spin_unlock(&obd->obd_dev_lock);
674
675         if (!recovering)
676                 return;
677         CDEBUG(D_HA, "timer will expire in %u seconds\n",
678                OBD_RECOVERY_TIMEOUT / HZ);
679         mod_timer(&obd->obd_recovery_timer, jiffies + OBD_RECOVERY_TIMEOUT);
680 }
681
682
683 /* Only start it the first time called */
684 void target_start_recovery_timer(struct obd_device *obd, svc_handler_t handler)
685 {
686         spin_lock_bh(&obd->obd_processing_task_lock);
687         if (obd->obd_recovery_handler) {
688                 spin_unlock_bh(&obd->obd_processing_task_lock);
689                 return;
690         }
691         CWARN("%s: starting recovery timer (%us)\n", obd->obd_name,
692                OBD_RECOVERY_TIMEOUT / HZ);
693         obd->obd_recovery_handler = handler;
694         obd->obd_recovery_timer.function = target_recovery_expired;
695         obd->obd_recovery_timer.data = (unsigned long)obd;
696         init_timer(&obd->obd_recovery_timer);
697         spin_unlock_bh(&obd->obd_processing_task_lock);
698
699         reset_recovery_timer(obd);
700 }
701
702 static int check_for_next_transno(struct obd_device *obd)
703 {
704         struct ptlrpc_request *req;
705         int wake_up = 0, connected, completed, queue_len, max;
706         __u64 next_transno, req_transno;
707
708         spin_lock_bh(&obd->obd_processing_task_lock);
709         req = list_entry(obd->obd_recovery_queue.next,
710                          struct ptlrpc_request, rq_list);
711         max = obd->obd_max_recoverable_clients;
712         req_transno = req->rq_reqmsg->transno;
713         connected = obd->obd_connected_clients;
714         completed = max - obd->obd_recoverable_clients;
715         queue_len = obd->obd_requests_queued_for_recovery;
716         next_transno = obd->obd_next_recovery_transno;
717
718         if (obd->obd_abort_recovery) {
719                 CDEBUG(D_HA, "waking for aborted recovery\n");
720                 wake_up = 1;
721         } else if (!obd->obd_recovering) {
722                 CDEBUG(D_HA, "waking for completed recovery (?)\n");
723                 wake_up = 1;
724         } else if (req_transno == next_transno) {
725                 CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
726                 wake_up = 1;
727         } else if (queue_len + completed == max) {
728                 CDEBUG(D_ERROR,
729                        "waking for skipped transno (skip: "LPD64
730                        ", ql: %d, comp: %d, conn: %d, next: "LPD64")\n",
731                        next_transno, queue_len, completed, max, req_transno);
732                 obd->obd_next_recovery_transno = req_transno;
733                 wake_up = 1;
734         }
735         spin_unlock_bh(&obd->obd_processing_task_lock);
736         LASSERT(req->rq_reqmsg->transno >= next_transno);
737         return wake_up;
738 }
739
740 static void process_recovery_queue(struct obd_device *obd)
741 {
742         struct ptlrpc_request *req;
743         int abort_recovery = 0;
744         struct l_wait_info lwi = { 0 };
745         ENTRY;
746
747         for (;;) {
748                 spin_lock_bh(&obd->obd_processing_task_lock);
749                 LASSERT(obd->obd_processing_task == current->pid);
750                 req = list_entry(obd->obd_recovery_queue.next,
751                                  struct ptlrpc_request, rq_list);
752
753                 if (req->rq_reqmsg->transno != obd->obd_next_recovery_transno) {
754                         spin_unlock_bh(&obd->obd_processing_task_lock);
755                         CDEBUG(D_HA, "Waiting for transno "LPD64" (1st is "
756                                LPD64")\n",
757                                obd->obd_next_recovery_transno,
758                                req->rq_reqmsg->transno);
759                         l_wait_event(obd->obd_next_transno_waitq,
760                                      check_for_next_transno(obd), &lwi);
761                         spin_lock_bh(&obd->obd_processing_task_lock);
762                         abort_recovery = obd->obd_abort_recovery;
763                         spin_unlock_bh(&obd->obd_processing_task_lock);
764                         if (abort_recovery) {
765                                 target_abort_recovery(obd);
766                                 return;
767                         }
768                         continue;
769                 }
770                 list_del_init(&req->rq_list);
771                 obd->obd_requests_queued_for_recovery--;
772                 spin_unlock_bh(&obd->obd_processing_task_lock);
773
774                 DEBUG_REQ(D_HA, req, "processing: ");
775                 (void)obd->obd_recovery_handler(req);
776                 obd->obd_replayed_requests++;
777                 reset_recovery_timer(obd);
778                 /* bug 1580: decide how to properly sync() in recovery */
779                 //mds_fsync_super(mds->mds_sb);
780                 class_export_put(req->rq_export);
781                 OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
782                 OBD_FREE(req, sizeof *req);
783                 spin_lock_bh(&obd->obd_processing_task_lock);
784                 obd->obd_next_recovery_transno++;
785                 if (list_empty(&obd->obd_recovery_queue)) {
786                         obd->obd_processing_task = 0;
787                         spin_unlock_bh(&obd->obd_processing_task_lock);
788                         break;
789                 }
790                 spin_unlock_bh(&obd->obd_processing_task_lock);
791         }
792         EXIT;
793 }
794
795 int target_queue_recovery_request(struct ptlrpc_request *req,
796                                   struct obd_device *obd)
797 {
798         struct list_head *tmp;
799         int inserted = 0;
800         __u64 transno = req->rq_reqmsg->transno;
801         struct ptlrpc_request *saved_req;
802         struct lustre_msg *reqmsg;
803
804         /* CAVEAT EMPTOR: The incoming request message has been swabbed
805          * (i.e. buflens etc are in my own byte order), but type-dependent
806          * buffers (eg mds_body, ost_body etc) have NOT been swabbed. */
807
808         if (!transno) {
809                 INIT_LIST_HEAD(&req->rq_list);
810                 DEBUG_REQ(D_HA, req, "not queueing");
811                 return 1;
812         }
813
814         /* XXX If I were a real man, these LBUGs would be sane cleanups. */
815         /* XXX just like the request-dup code in queue_final_reply */
816         OBD_ALLOC(saved_req, sizeof *saved_req);
817         if (!saved_req)
818                 LBUG();
819         OBD_ALLOC(reqmsg, req->rq_reqlen);
820         if (!reqmsg)
821                 LBUG();
822
823         spin_lock_bh(&obd->obd_processing_task_lock);
824
825         /* If we're processing the queue, we want don't want to queue this
826          * message.
827          *
828          * Also, if this request has a transno less than the one we're waiting
829          * for, we should process it now.  It could (and currently always will)
830          * be an open request for a descriptor that was opened some time ago.
831          */
832         if (obd->obd_processing_task == current->pid ||
833             transno < obd->obd_next_recovery_transno) {
834                 /* Processing the queue right now, don't re-add. */
835                 LASSERT(list_empty(&req->rq_list));
836                 spin_unlock_bh(&obd->obd_processing_task_lock);
837                 OBD_FREE(reqmsg, req->rq_reqlen);
838                 OBD_FREE(saved_req, sizeof *saved_req);
839                 return 1;
840         }
841
842         memcpy(saved_req, req, sizeof *req);
843         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
844         req = saved_req;
845         req->rq_reqmsg = reqmsg;
846         class_export_get(req->rq_export);
847         INIT_LIST_HEAD(&req->rq_list);
848
849         /* XXX O(n^2) */
850         list_for_each(tmp, &obd->obd_recovery_queue) {
851                 struct ptlrpc_request *reqiter =
852                         list_entry(tmp, struct ptlrpc_request, rq_list);
853
854                 if (reqiter->rq_reqmsg->transno > transno) {
855                         list_add_tail(&req->rq_list, &reqiter->rq_list);
856                         inserted = 1;
857                         break;
858                 }
859         }
860
861         if (!inserted) {
862                 list_add_tail(&req->rq_list, &obd->obd_recovery_queue);
863         }
864
865         obd->obd_requests_queued_for_recovery++;
866
867         if (obd->obd_processing_task != 0) {
868                 /* Someone else is processing this queue, we'll leave it to
869                  * them.
870                  */
871                 wake_up(&obd->obd_next_transno_waitq);
872                 spin_unlock_bh(&obd->obd_processing_task_lock);
873                 return 0;
874         }
875
876         /* Nobody is processing, and we know there's (at least) one to process
877          * now, so we'll do the honours.
878          */
879         obd->obd_processing_task = current->pid;
880         spin_unlock_bh(&obd->obd_processing_task_lock);
881
882         process_recovery_queue(obd);
883         return 0;
884 }
885
886 struct obd_device * target_req2obd(struct ptlrpc_request *req)
887 {
888         return req->rq_export->exp_obd;
889 }
890
891 int target_queue_final_reply(struct ptlrpc_request *req, int rc)
892 {
893         struct obd_device *obd = target_req2obd(req);
894         struct ptlrpc_request *saved_req;
895         struct lustre_msg *reqmsg;
896         int recovery_done = 0;
897         int rc2;
898
899         if (rc) {
900                 /* Just like ptlrpc_error, but without the sending. */
901                 rc = lustre_pack_reply(req, 0, NULL, NULL);
902                 LASSERT(rc == 0); /* XXX handle this */
903                 req->rq_type = PTL_RPC_MSG_ERR;
904         }
905
906         LASSERT(list_empty(&req->rq_list));
907         /* XXX a bit like the request-dup code in queue_recovery_request */
908         OBD_ALLOC(saved_req, sizeof *saved_req);
909         if (!saved_req)
910                 LBUG();
911         OBD_ALLOC(reqmsg, req->rq_reqlen);
912         if (!reqmsg)
913                 LBUG();
914         memcpy(saved_req, req, sizeof *saved_req);
915         memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
916         req = saved_req;
917         req->rq_reqmsg = reqmsg;
918         list_add(&req->rq_list, &obd->obd_delayed_reply_queue);
919
920         spin_lock_bh(&obd->obd_processing_task_lock);
921         --obd->obd_recoverable_clients;
922         recovery_done = (obd->obd_recoverable_clients == 0);
923         spin_unlock_bh(&obd->obd_processing_task_lock);
924
925         if (recovery_done) {
926                 struct list_head *tmp, *n;
927                 ldlm_reprocess_all_ns(req->rq_export->exp_obd->obd_namespace);
928                 CWARN("%s: all clients recovered, sending delayed replies\n",
929                        obd->obd_name);
930                 obd->obd_recovering = 0;
931
932                 /* when recovering finished, cleanup orphans for mds */
933                 if (OBT(obd) && OBP(obd, postrecov)) {
934                         rc2 = OBP(obd, postrecov)(obd);
935                         if (rc2 >= 0)
936                                 CWARN("%s: all clients recovered, %d MDS orphans "
937                                        "deleted\n", obd->obd_name, rc2);
938                         else
939                                 CERROR("postrecov failed %d\n", rc2);
940                 }
941
942                 list_for_each_safe(tmp, n, &obd->obd_delayed_reply_queue) {
943                         req = list_entry(tmp, struct ptlrpc_request, rq_list);
944                         DEBUG_REQ(D_ERROR, req, "delayed:");
945                         ptlrpc_reply(req);
946                         list_del(&req->rq_list);
947                         OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
948                         OBD_FREE(req, sizeof *req);
949                 }
950                 target_cancel_recovery_timer(obd);
951         } else {
952                 CERROR("%s: %d recoverable clients remain\n",
953                        obd->obd_name, obd->obd_recoverable_clients);
954                 wake_up(&obd->obd_next_transno_waitq);
955         }
956
957         return 1;
958 }
959
960 static void ptlrpc_abort_reply (struct ptlrpc_request *req)
961 {
962         /* On return, we must be sure that the ACK callback has either
963          * happened or will not happen.  Note that the SENT callback will
964          * happen come what may since we successfully posted the PUT. */
965         int rc;
966         struct l_wait_info lwi;
967         unsigned long flags;
968
969  again:
970         /* serialise with ACK callback */
971         spin_lock_irqsave (&req->rq_lock, flags);
972         if (!req->rq_want_ack) {
973                 spin_unlock_irqrestore (&req->rq_lock, flags);
974                 /* The ACK callback has happened already.  Although the
975                  * SENT callback might still be outstanding (yes really) we
976                  * don't care; this is just like normal completion. */
977                 return;
978         }
979         spin_unlock_irqrestore (&req->rq_lock, flags);
980
981         /* Have a bash at unlinking the MD.  This will fail until the SENT
982          * callback has happened since the MD is busy from the PUT.  If the
983          * ACK still hasn't arrived after then, a successful unlink will
984          * ensure the ACK callback never happens. */
985         rc = PtlMDUnlink (req->rq_reply_md_h);
986         switch (rc) {
987         default:
988                 LBUG ();
989         case PTL_OK:
990                 /* SENT callback happened; ACK callback preempted */
991                 LASSERT (req->rq_want_ack);
992                 spin_lock_irqsave (&req->rq_lock, flags);
993                 req->rq_want_ack = 0;
994                 spin_unlock_irqrestore (&req->rq_lock, flags);
995                 return;
996         case PTL_INV_MD:
997                 return;
998         case PTL_MD_INUSE:
999                 /* Still sending or ACK callback in progress: wait until
1000                  * either callback has completed and try again.
1001                  * Actually we can't wait for the SENT callback because
1002                  * there's no state the SENT callback can touch that will
1003                  * allow it to communicate with us!  So we just wait here
1004                  * for a short time, effectively polling for the SENT
1005                  * callback by calling PtlMDUnlink() again, to see if it
1006                  * has finished.  Note that if the ACK does arrive, its
1007                  * callback wakes us in short order. --eeb */
1008                 lwi = LWI_TIMEOUT (HZ/4, NULL, NULL);
1009                 rc = l_wait_event(req->rq_reply_waitq, !req->rq_want_ack,
1010                                   &lwi);
1011                 CDEBUG (D_HA, "Retrying req %p: %d\n", req, rc);
1012                 /* NB go back and test rq_want_ack with locking, to ensure
1013                  * if ACK callback happened, it has completed stopped
1014                  * referencing this req. */
1015                 goto again;
1016         }
1017 }
1018
1019 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
1020 {
1021         int i;
1022         int netrc;
1023         unsigned long flags;
1024         struct ptlrpc_req_ack_lock *ack_lock;
1025         struct l_wait_info lwi = { 0 };
1026         wait_queue_t commit_wait;
1027         struct obd_device *obd =
1028                 req->rq_export ? req->rq_export->exp_obd : NULL;
1029         struct obd_export *exp = NULL;
1030
1031         if (req->rq_export) {
1032                 for (i = 0; i < REQ_MAX_ACK_LOCKS; i++) {
1033                         if (req->rq_ack_locks[i].mode) {
1034                                 exp = req->rq_export;
1035                                 break;
1036                         }
1037                 }
1038         }
1039
1040         if (exp) {
1041                 exp->exp_outstanding_reply = req;
1042                 spin_lock_irqsave (&req->rq_lock, flags);
1043                 req->rq_want_ack = 1;
1044                 spin_unlock_irqrestore (&req->rq_lock, flags);
1045         }
1046
1047         if (!OBD_FAIL_CHECK(fail_id | OBD_FAIL_ONCE)) {
1048                 if (rc == 0) {
1049                         DEBUG_REQ(D_NET, req, "sending reply");
1050                         netrc = ptlrpc_reply(req);
1051                 } else if (rc == -ENOTCONN) {
1052                         DEBUG_REQ(D_HA, req, "processing error (%d)", rc);
1053                         netrc = ptlrpc_error(req);
1054                 } else {
1055                         DEBUG_REQ(D_ERROR, req, "processing error (%d)", rc);
1056                         netrc = ptlrpc_error(req);
1057                 }
1058         } else {
1059                 obd_fail_loc |= OBD_FAIL_ONCE | OBD_FAILED;
1060                 DEBUG_REQ(D_ERROR, req, "dropping reply");
1061                 if (req->rq_repmsg) {
1062                         OBD_FREE(req->rq_repmsg, req->rq_replen);
1063                         req->rq_repmsg = NULL;
1064                 }
1065                 init_waitqueue_head(&req->rq_reply_waitq);
1066                 netrc = 0;
1067         }
1068
1069         /* a failed send simulates the callbacks */
1070         LASSERT(netrc == 0 || req->rq_want_ack == 0);
1071         if (exp == NULL) {
1072                 LASSERT(req->rq_want_ack == 0);
1073                 return;
1074         }
1075         LASSERT(obd != NULL);
1076
1077         init_waitqueue_entry(&commit_wait, current);
1078         add_wait_queue(&obd->obd_commit_waitq, &commit_wait);
1079         rc = l_wait_event(req->rq_reply_waitq,
1080                           !req->rq_want_ack || req->rq_resent ||
1081                           req->rq_transno <= obd->obd_last_committed, &lwi);
1082         remove_wait_queue(&obd->obd_commit_waitq, &commit_wait);
1083
1084         spin_lock_irqsave (&req->rq_lock, flags);
1085         /* If we got here because the ACK callback ran, this acts as a
1086          * barrier to ensure the callback completed the wakeup. */
1087         spin_unlock_irqrestore (&req->rq_lock, flags);
1088
1089         /* If we committed the transno already, then we might wake up before
1090          * the ack arrives.  We need to stop waiting for the ack before we can
1091          * reuse this request structure.  We are guaranteed by this point that
1092          * this cannot abort the sending of the actual reply.*/
1093         ptlrpc_abort_reply(req);
1094
1095         if (req->rq_resent) {
1096                 DEBUG_REQ(D_HA, req, "resent: not cancelling locks");
1097                 return;
1098         }
1099
1100         LASSERT(rc == 0);
1101         DEBUG_REQ(D_HA, req, "cancelling locks for %s",
1102                   req->rq_want_ack ? "commit" : "ack");
1103
1104         exp->exp_outstanding_reply = NULL;
1105
1106         for (ack_lock = req->rq_ack_locks, i = 0;
1107              i < REQ_MAX_ACK_LOCKS; i++, ack_lock++) {
1108                 if (!ack_lock->mode)
1109                         continue;
1110                 ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
1111         }
1112 }
1113
1114 int target_handle_ping(struct ptlrpc_request *req)
1115 {
1116         return lustre_pack_reply(req, 0, NULL, NULL);
1117 }
1118
1119 void *ldlm_put_lock_into_req(struct ptlrpc_request *req,
1120                                 struct lustre_handle *lock, int mode)
1121 {
1122         int i;
1123
1124         for (i = 0; i < REQ_MAX_ACK_LOCKS; i++) {
1125                 if (req->rq_ack_locks[i].mode)
1126                         continue;
1127                 memcpy(&req->rq_ack_locks[i].lock, lock, sizeof(*lock));
1128                 req->rq_ack_locks[i].mode = mode;
1129                 return &req->rq_ack_locks[i];
1130         }
1131         CERROR("no space for lock in struct ptlrpc_request\n");
1132         LBUG();
1133         return NULL;
1134 }
1135