Whamcloud - gitweb
- added more debug client side pipe related stuff
[fs/lustre-release.git] / lustre / ldlm / ldlm_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #ifndef EXPORT_SYMTAB
23 # define EXPORT_SYMTAB
24 #endif
25 #define DEBUG_SUBSYSTEM S_LDLM
26
27 #ifdef __KERNEL__
28 # include <linux/module.h>
29 #else
30 # include <liblustre.h>
31 #endif
32 #include <linux/obd.h>
33 #include <linux/obd_ost.h> /* for LUSTRE_OSC_NAME */
34 #include <linux/lustre_mds.h> /* for LUSTRE_MDC_NAME */
35 #include <linux/lustre_mgmt.h>
36 #include <linux/lustre_dlm.h>
37 #include <linux/lustre_net.h>
38 #include <linux/lustre_sec.h>
39 #include <linux/lustre_gs.h>
40
41 /* @priority: if non-zero, move the selected to the list head
42  * @nocreate: if non-zero, only search in existed connections
43  */
44 static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
45                            int priority, int nocreate)
46 {
47         struct ptlrpc_connection *ptlrpc_conn;
48         struct obd_import_conn *imp_conn = NULL, *item;
49         int rc = 0;
50         ENTRY;
51
52         LASSERT(!(nocreate && !priority));
53
54         ptlrpc_conn = ptlrpc_uuid_to_connection(uuid);
55         if (!ptlrpc_conn) {
56                 CERROR("can't find connection %s\n", uuid->uuid);
57                 RETURN (-EINVAL);
58         }
59
60         if (!nocreate) {
61                 OBD_ALLOC(imp_conn, sizeof(*imp_conn));
62                 if (!imp_conn) {
63                         CERROR("fail to alloc memory\n");
64                         GOTO(out_put, rc = -ENOMEM);
65                 }
66         }
67
68         spin_lock(&imp->imp_lock);
69         list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
70                 if (obd_uuid_equals(uuid, &item->oic_uuid)) {
71                         if (priority) {
72                                 list_del(&item->oic_item);
73                                 list_add(&item->oic_item, &imp->imp_conn_list);
74                                 item->oic_last_attempt = 0;
75                         }
76                         CDEBUG(D_HA, "imp %p@%s: find existed conn %s%s\n",
77                                imp, imp->imp_obd->obd_name, uuid->uuid,
78                                (priority ? ", move to head." : ""));
79                         spin_unlock(&imp->imp_lock);
80                         GOTO(out_free, rc = 0);
81                 }
82         }
83         /* not found */
84         if (!nocreate) {
85                 imp_conn->oic_conn = ptlrpc_conn;
86                 imp_conn->oic_uuid = *uuid;
87                 imp_conn->oic_last_attempt = 0;
88                 if (priority)
89                         list_add(&imp_conn->oic_item, &imp->imp_conn_list);
90                 else
91                         list_add_tail(&imp_conn->oic_item, &imp->imp_conn_list);
92                 CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
93                        imp, imp->imp_obd->obd_name, uuid->uuid,
94                        (priority ? "head" : "tail"));
95         } else
96                 rc = -ENOENT;
97
98         spin_unlock(&imp->imp_lock);
99         RETURN(0);
100 out_free:
101         if (imp_conn)
102                 OBD_FREE(imp_conn, sizeof(*imp_conn));
103 out_put:
104         ptlrpc_put_connection(ptlrpc_conn);
105         RETURN(rc);
106 }
107
108 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid)
109 {
110         return import_set_conn(imp, uuid, 1, 1);
111 }
112
113 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
114                            int priority)
115 {
116         return import_set_conn(imp, uuid, priority, 0);
117 }
118
119 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
120 {
121         struct obd_import_conn *imp_conn;
122         struct obd_export *dlmexp;
123         int rc = -ENOENT;
124         ENTRY;
125
126         spin_lock(&imp->imp_lock);
127         if (list_empty(&imp->imp_conn_list)) {
128                 LASSERT(!imp->imp_conn_current);
129                 LASSERT(!imp->imp_connection);
130                 GOTO(out, rc);
131         }
132
133         list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
134                 if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
135                         continue;
136                 LASSERT(imp_conn->oic_conn);
137
138                 /* is current conn? */
139                 if (imp_conn == imp->imp_conn_current) {
140                         LASSERT(imp_conn->oic_conn == imp->imp_connection);
141
142                         if (imp->imp_state != LUSTRE_IMP_CLOSED &&
143                             imp->imp_state != LUSTRE_IMP_DISCON) {
144                                 CERROR("can't remove current connection\n");
145                                 GOTO(out, rc = -EBUSY);
146                         }
147
148                         ptlrpc_put_connection(imp->imp_connection);
149                         imp->imp_connection = NULL;
150
151                         dlmexp = class_conn2export(&imp->imp_dlm_handle);
152                         if (dlmexp && dlmexp->exp_connection) {
153                                 LASSERT(dlmexp->exp_connection ==
154                                         imp_conn->oic_conn);
155                                 ptlrpc_put_connection(dlmexp->exp_connection);
156                                 dlmexp->exp_connection = NULL;
157                         }
158                 }
159
160                 list_del(&imp_conn->oic_item);
161                 ptlrpc_put_connection(imp_conn->oic_conn);
162                 OBD_FREE(imp_conn, sizeof(*imp_conn));
163                 CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
164                        imp, imp->imp_obd->obd_name, uuid->uuid);
165                 rc = 0;
166                 break;
167         }
168 out:
169         spin_unlock(&imp->imp_lock);
170         if (rc == -ENOENT)
171                 CERROR("connection %s not found\n", uuid->uuid);
172         RETURN(rc);
173 }
174
175 int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf)
176 {
177         struct lustre_cfg* lcfg = buf;
178         struct client_obd *cli = &obddev->u.cli;
179         struct obd_import *imp;
180         struct obd_uuid server_uuid;
181         int rq_portal, rp_portal, connect_op;
182         char *name = obddev->obd_type->typ_name;
183         char *mgmt_name = NULL;
184         int rc;
185         ENTRY;
186
187         /* In a more perfect world, we would hang a ptlrpc_client off of
188          * obd_type and just use the values from there. */
189         if (!strcmp(name, OBD_OSC_DEVICENAME)) {
190                 rq_portal = OST_REQUEST_PORTAL;
191                 rp_portal = OSC_REPLY_PORTAL;
192                 connect_op = OST_CONNECT;
193         } else if (!strcmp(name, OBD_MDC_DEVICENAME)) {
194                 rq_portal = MDS_REQUEST_PORTAL;
195                 rp_portal = MDC_REPLY_PORTAL;
196                 connect_op = MDS_CONNECT;
197         } else if (!strcmp(name, OBD_MGMTCLI_DEVICENAME)) {
198                 rq_portal = MGMT_REQUEST_PORTAL;
199                 rp_portal = MGMT_REPLY_PORTAL;
200                 connect_op = MGMT_CONNECT;
201         } else if (!strcmp(name, LUSTRE_GKC_NAME)) {
202                 rq_portal = GKS_REQUEST_PORTAL;
203                 rp_portal = GKC_REPLY_PORTAL;
204                 connect_op = GKS_CONNECT;
205
206         } else {
207                 CERROR("unknown client OBD type \"%s\", can't setup\n",
208                        name);
209                 RETURN(-EINVAL);
210         }
211
212
213         if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
214                 CERROR("requires a TARGET UUID\n");
215                 RETURN(-EINVAL);
216         }
217
218         if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
219                 CERROR("client UUID must be less than 38 characters\n");
220                 RETURN(-EINVAL);
221         }
222
223         if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
224                 CERROR("setup requires a SERVER UUID\n");
225                 RETURN(-EINVAL);
226         }
227
228         if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
229                 CERROR("target UUID must be less than 38 characters\n");
230                 RETURN(-EINVAL);
231         }
232
233         sema_init(&cli->cl_sem, 1);
234         cli->cl_conn_count = 0;
235         memcpy(server_uuid.uuid,  lustre_cfg_buf(lcfg, 2),
236                min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2), 
237                sizeof(server_uuid)));
238
239         cli->cl_dirty = 0;
240         cli->cl_avail_grant = 0;
241         /* FIXME: should limit this for the sum of all cl_dirty_max */
242         cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
243         if (cli->cl_dirty_max >> PAGE_SHIFT > num_physpages / 8)
244                 cli->cl_dirty_max = num_physpages << (PAGE_SHIFT - 3);
245         INIT_LIST_HEAD(&cli->cl_cache_waiters);
246         INIT_LIST_HEAD(&cli->cl_loi_ready_list);
247         INIT_LIST_HEAD(&cli->cl_loi_write_list);
248         INIT_LIST_HEAD(&cli->cl_loi_read_list);
249         spin_lock_init(&cli->cl_loi_list_lock);
250         cli->cl_r_in_flight = 0;
251         cli->cl_w_in_flight = 0;
252         spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
253         spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
254         spin_lock_init(&cli->cl_read_page_hist.oh_lock);
255         spin_lock_init(&cli->cl_write_page_hist.oh_lock);
256
257         memset(&cli->cl_last_write_time, 0,
258                sizeof(cli->cl_last_write_time));
259         cli->cl_write_gap_sum = 0;
260         cli->cl_write_gaps = 0;
261         cli->cl_write_num = 0;
262         cli->cl_read_num = 0;
263         cli->cl_cache_wait_num = 0;
264         cli->cl_cache_wait_sum = 0;
265
266         if (num_physpages >> (20 - PAGE_SHIFT) <= 128) { /* <= 128 MB */
267                 cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES / 4;
268                 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT / 4;
269 #if 0
270         } else if (num_physpages >> (20 - PAGE_SHIFT) <= 512) { /* <= 512 MB */
271                 cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES / 2;
272                 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT / 2;
273 #endif
274         } else {
275                 cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES;
276                 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
277         }
278
279         rc = ldlm_get_ref();
280         if (rc) {
281                 CERROR("ldlm_get_ref failed: %d\n", rc);
282                 GOTO(err, rc);
283         }
284
285         ptlrpc_init_client(rq_portal, rp_portal, name,
286                            &obddev->obd_ldlm_client);
287
288         imp = class_new_import();
289         if (imp == NULL) 
290                 GOTO(err_ldlm, rc = -ENOENT);
291         imp->imp_client = &obddev->obd_ldlm_client;
292         imp->imp_obd = obddev;
293         imp->imp_connect_op = connect_op;
294         imp->imp_generation = 0;
295         imp->imp_initial_recov = 1;
296         INIT_LIST_HEAD(&imp->imp_pinger_chain);
297         memcpy(imp->imp_target_uuid.uuid, lustre_cfg_buf(lcfg, 1),
298                LUSTRE_CFG_BUFLEN(lcfg, 1));
299         class_import_put(imp);
300
301         rc = client_import_add_conn(imp, &server_uuid, 1);
302         if (rc) {
303                 CERROR("can't add initial connection\n");
304                 GOTO(err_import, rc);
305         }
306
307         cli->cl_import = imp;
308         cli->cl_max_mds_easize = sizeof(struct lov_mds_md);
309         cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
310         cli->cl_sandev = to_kdev_t(0);
311
312         if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
313                 if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
314                         CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
315                                name, obddev->obd_name,
316                                imp->imp_target_uuid.uuid);
317                         imp->imp_invalid = 1;
318
319                         if (LUSTRE_CFG_BUFLEN(lcfg, 4) > 0)
320                                 mgmt_name = lustre_cfg_string(lcfg, 4);
321                 } else {
322                         mgmt_name = lustre_cfg_string(lcfg, 3);
323                 }
324         }
325 #if 0
326         if (mgmt_name != NULL) {
327                 /* Register with management client if we need to. */
328                 CDEBUG(D_HA, "%s registering with %s for events about %s\n",
329                        obddev->obd_name, mgmt_name, server_uuid.uuid);
330
331                 mgmt_obd = class_name2obd(mgmt_name);
332                 if (!mgmt_obd) {
333                         CERROR("can't find mgmtcli %s to register\n",
334                                mgmt_name);
335                         GOTO(err_import, rc = -ENOSYS);
336                 }
337
338                 register_f = (mgmtcli_register_for_events_t)symbol_get("mgmtcli_register_for_events");
339                 if (!register_f) {
340                         CERROR("can't i_m_g mgmtcli_register_for_events\n");
341                         GOTO(err_import, rc = -ENOSYS);
342                 }
343
344                 rc = register_f(mgmt_obd, obddev, &imp->imp_target_uuid);
345                 symbol_put("mgmtcli_register_for_events");
346
347                 if (!rc)
348                         cli->cl_mgmtcli_obd = mgmt_obd;
349         }
350 #endif
351         RETURN(rc);
352
353 err_import:
354         class_destroy_import(imp);
355 err_ldlm:
356         ldlm_put_ref(0);
357 err:
358         RETURN(rc);
359
360 }
361
362 int client_obd_cleanup(struct obd_device *obddev, int flags)
363 {
364         struct client_obd *cli = &obddev->u.cli;
365         ENTRY;
366
367         if (!cli->cl_import)
368                 RETURN(-EINVAL);
369         if (cli->cl_mgmtcli_obd) {
370                 mgmtcli_deregister_for_events_t dereg_f;
371
372                 dereg_f = (mgmtcli_deregister_for_events_t)symbol_get("mgmtcli_deregister_for_events");
373                 dereg_f(cli->cl_mgmtcli_obd, obddev);
374                 symbol_put("mgmtcli_deregister_for_events");
375         }
376
377         /* Here we try to drop the security structure after destroy import,
378          * to avoid issue of "sleep in spinlock".
379          */
380         class_import_get(cli->cl_import);
381         class_destroy_import(cli->cl_import);
382         ptlrpcs_import_drop_sec(cli->cl_import);
383         class_import_put(cli->cl_import);
384         cli->cl_import = NULL;
385
386         if (cli->cl_write_gaps) {
387                 CWARN("%s: (write num: %lu, read num: %lu): %lu write gaps: %lu "
388                       "av. (usec), %lu total (usec)\n", obddev->obd_name,
389                       cli->cl_write_num, cli->cl_read_num, cli->cl_write_gaps,
390                       cli->cl_write_gap_sum / cli->cl_write_gaps,
391                       cli->cl_write_gap_sum);
392         }
393         if (cli->cl_cache_wait_num) {
394                 CWARN("%s: cache wait num: %lu, cache wait av. %lu (usec)\n",
395                       obddev->obd_name, cli->cl_cache_wait_num,
396                       cli->cl_cache_wait_sum / cli->cl_cache_wait_num);
397         }
398
399         ldlm_put_ref(flags & OBD_OPT_FORCE);
400         RETURN(0);
401 }
402
403 int client_connect_import(struct lustre_handle *dlm_handle,
404                           struct obd_device *obd,
405                           struct obd_uuid *cluuid,
406                           struct obd_connect_data *conn_data,
407                           unsigned long connect_flags)
408 {
409         struct client_obd *cli = &obd->u.cli;
410         struct obd_import *imp = cli->cl_import;
411         struct obd_export *exp;
412         int rc;
413         ENTRY;
414
415         down(&cli->cl_sem);
416         rc = class_connect(dlm_handle, obd, cluuid);
417         if (rc)
418                 GOTO(out_sem, rc);
419
420         cli->cl_conn_count++;
421         if (cli->cl_conn_count > 1)
422                 GOTO(out_sem, rc);
423         exp = class_conn2export(dlm_handle);
424
425         if (obd->obd_namespace != NULL)
426                 CERROR("already have namespace!\n");
427         obd->obd_namespace = ldlm_namespace_new(obd->obd_name,
428                                                 LDLM_NAMESPACE_CLIENT);
429         if (obd->obd_namespace == NULL)
430                 GOTO(out_disco, rc = -ENOMEM);
431
432         rc = ptlrpcs_import_get_sec(imp);
433         if (rc != 0)
434                 GOTO(out_ldlm, rc);
435
436         imp->imp_dlm_handle = *dlm_handle;
437         rc = ptlrpc_init_import(imp);
438         if (rc != 0) 
439                 GOTO(out_ldlm, rc);
440
441         imp->imp_connect_flags = connect_flags;
442         if (conn_data)
443                 memcpy(&imp->imp_connect_data, conn_data, sizeof(*conn_data));
444
445         rc = ptlrpc_connect_import(imp, NULL);
446         if (rc != 0) {
447                 LASSERT (imp->imp_state == LUSTRE_IMP_DISCON);
448                 GOTO(out_ldlm, rc);
449         }
450         LASSERT(exp->exp_connection);
451         ptlrpc_pinger_add_import(imp);
452         EXIT;
453
454         if (rc) {
455 out_ldlm:
456                 ldlm_namespace_free(obd->obd_namespace, 0);
457                 obd->obd_namespace = NULL;
458 out_disco:
459                 cli->cl_conn_count--;
460                 class_disconnect(exp, 0);
461         } else {
462                 class_export_put(exp);
463         }
464 out_sem:
465         up(&cli->cl_sem);
466         return rc;
467 }
468
469 int client_disconnect_export(struct obd_export *exp, unsigned long flags)
470 {
471         struct obd_device *obd = class_exp2obd(exp);
472         struct client_obd *cli = &obd->u.cli;
473         struct obd_import *imp = cli->cl_import;
474         int rc = 0, err;
475         ENTRY;
476
477         if (!obd) {
478                 CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
479                        exp, exp ? exp->exp_handle.h_cookie : -1);
480                 RETURN(-EINVAL);
481         }
482
483         down(&cli->cl_sem);
484         if (!cli->cl_conn_count) {
485                 CERROR("disconnecting disconnected device (%s)\n",
486                        obd->obd_name);
487                 GOTO(out_sem, rc = -EINVAL);
488         }
489
490         cli->cl_conn_count--;
491         if (cli->cl_conn_count)
492                 GOTO(out_no_disconnect, rc = 0);
493
494         /* Some non-replayable imports (MDS's OSCs) are pinged, so just
495          * delete it regardless.  (It's safe to delete an import that was
496          * never added.) */
497         (void)ptlrpc_pinger_del_import(imp);
498
499         if (obd->obd_namespace != NULL) {
500                 /* obd_no_recov == local only */
501                 ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
502                                        obd->obd_no_recov, NULL);
503                 ldlm_namespace_free(obd->obd_namespace, obd->obd_no_recov);
504                 obd->obd_namespace = NULL;
505         }
506
507         /* 
508          * Yeah, obd_no_recov also (mainly) means "forced shutdown".
509          */
510         if (obd->obd_no_recov)
511                 ptlrpc_invalidate_import(imp, 0);
512         else
513                 rc = ptlrpc_disconnect_import(imp);
514
515         EXIT;
516  out_no_disconnect:
517         err = class_disconnect(exp, 0);
518         if (!rc && err)
519                 rc = err;
520  out_sem:
521         up(&cli->cl_sem);
522         RETURN(rc);
523 }
524
525 /* --------------------------------------------------------------------------
526  * from old lib/target.c
527  * -------------------------------------------------------------------------- */
528
529 int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
530                             struct obd_uuid *cluuid, int initial_conn)
531 {
532         if (exp->exp_connection && !initial_conn) {
533                 struct lustre_handle *hdl;
534                 hdl = &exp->exp_imp_reverse->imp_remote_handle;
535                 /* Might be a re-connect after a partition. */
536                 if (!memcmp(&conn->cookie, &hdl->cookie, sizeof conn->cookie)) {
537                         CERROR("%s reconnecting\n", cluuid->uuid);
538                         conn->cookie = exp->exp_handle.h_cookie;
539                         RETURN(EALREADY);
540                 } else {
541                         CERROR("%s reconnecting from %s, "
542                                "handle mismatch (ours "LPX64", theirs "
543                                LPX64")\n", cluuid->uuid,
544                                exp->exp_connection->c_remote_uuid.uuid,
545                                hdl->cookie, conn->cookie);
546                         memset(conn, 0, sizeof *conn);
547                         RETURN(-EALREADY);
548                 }
549         }
550
551         conn->cookie = exp->exp_handle.h_cookie;
552         CDEBUG(D_INFO, "existing export for UUID '%s' at %p\n",
553                cluuid->uuid, exp);
554         CDEBUG(D_IOCTL,"connect: cookie "LPX64"\n", conn->cookie);
555         RETURN(0);
556 }
557
558 static inline int ptlrpc_peer_is_local(struct ptlrpc_peer *peer)
559 {
560         ptl_process_id_t myid;
561
562         PtlGetId(peer->peer_ni->pni_ni_h, &myid);
563         return (memcmp(&peer->peer_id, &myid, sizeof(myid)) == 0);
564 }
565
566 /* To check whether the p_flavor is in deny list or not
567  * rc:
568  *      0           not found, pass
569  *      EPERM       found, refuse
570  */
571
572 static int check_deny_list(struct list_head *head, __u32 flavor)
573 {
574         deny_sec_t *p_deny_sec = NULL;
575         deny_sec_t *n_deny_sec = NULL;
576
577         list_for_each_entry_safe(p_deny_sec, n_deny_sec, head, list) {
578                 if (p_deny_sec->flavor == flavor)
579                         return -EPERM;
580         }
581         return 0;
582 }
583
584 int target_check_deny_sec(struct obd_device *target, struct ptlrpc_request *req)
585 {
586         __u32 flavor;
587         int rc = 0;
588
589         flavor = req->rq_req_secflvr;
590
591         if (!strcmp(target->obd_type->typ_name, OBD_MDS_DEVICENAME)) {
592                 spin_lock(&target->u.mds.mds_denylist_lock);
593                 rc = check_deny_list(&target->u.mds.mds_denylist, flavor);
594                 spin_unlock(&target->u.mds.mds_denylist_lock);
595         } else if (!strcmp(target->obd_type->typ_name, OBD_FILTER_DEVICENAME)) {
596                 spin_lock(&target->u.filter.fo_denylist_lock);
597                 rc = check_deny_list(&target->u.filter.fo_denylist, flavor);
598                 spin_unlock(&target->u.filter.fo_denylist_lock);
599         }
600
601         return rc;
602 }
603
604 int target_handle_connect(struct ptlrpc_request *req)
605 {
606         unsigned long connect_flags = 0, *cfp;
607         struct obd_device *target;
608         struct obd_export *export = NULL;
609         struct obd_import *revimp;
610         struct lustre_handle conn;
611         struct obd_uuid tgtuuid;
612         struct obd_uuid cluuid;
613         struct obd_uuid remote_uuid;
614         struct list_head *p;
615         struct obd_connect_data *conn_data;
616         int conn_data_size = sizeof(*conn_data);
617         char *str, *tmp;
618         int rc = 0;
619         unsigned long flags;
620         int initial_conn = 0;
621         char peer_str[PTL_NALFMT_SIZE];
622         const int offset = 1;
623         ENTRY;
624
625         OBD_RACE(OBD_FAIL_TGT_CONN_RACE); 
626
627         LASSERT_REQSWAB (req, offset + 0);
628         str = lustre_msg_string(req->rq_reqmsg, offset + 0,
629                                 sizeof(tgtuuid) - 1);
630         if (str == NULL) {
631                 CERROR("bad target UUID for connect\n");
632                 GOTO(out, rc = -EINVAL);
633         }
634
635         obd_str2uuid (&tgtuuid, str);
636         target = class_uuid2obd(&tgtuuid);
637         if (!target)
638                 target = class_name2obd(str);
639         
640         if (!target || target->obd_stopping || !target->obd_set_up) {
641                 CERROR("UUID '%s' is not available for connect from %s\n",
642                        str, req->rq_peerstr);
643                 GOTO(out, rc = -ENODEV);
644         }
645
646         /* check the secure deny list of mds/ost */
647         rc = target_check_deny_sec(target, req);
648         if (rc != 0)
649                 GOTO(out, rc);
650
651         LASSERT_REQSWAB (req, offset + 1);
652         str = lustre_msg_string(req->rq_reqmsg, offset + 1, sizeof(cluuid) - 1);
653         if (str == NULL) {
654                 CERROR("bad client UUID for connect\n");
655                 GOTO(out, rc = -EINVAL);
656         }
657
658         obd_str2uuid (&cluuid, str);
659
660         /* XXX extract a nettype and format accordingly */
661         switch (sizeof(ptl_nid_t)) {
662                 /* NB the casts only avoid compiler warnings */
663         case 8:
664                 snprintf((char *)remote_uuid.uuid, sizeof(remote_uuid),
665                          "NET_"LPX64"_UUID", (__u64)req->rq_peer.peer_id.nid);
666                 break;
667         case 4:
668                 snprintf((char *)remote_uuid.uuid, sizeof(remote_uuid),
669                          "NET_%x_UUID", (__u32)req->rq_peer.peer_id.nid);
670                 break;
671         default:
672                 LBUG();
673         }
674
675         tmp = lustre_msg_buf(req->rq_reqmsg, offset + 2, sizeof(conn));
676         if (tmp == NULL)
677                 GOTO(out, rc = -EPROTO);
678
679         memcpy(&conn, tmp, sizeof conn);
680
681         cfp = lustre_msg_buf(req->rq_reqmsg, offset + 3, sizeof(unsigned long));
682         LASSERT(cfp != NULL);
683         connect_flags = *cfp;
684
685         conn_data = lustre_swab_reqbuf(req, offset + 4, sizeof(*conn_data),
686                                        lustre_swab_connect);
687         if (!conn_data)
688                 GOTO(out, rc = -EPROTO);
689
690         rc = lustre_pack_reply(req, 1, &conn_data_size, NULL);
691         if (rc)
692                 GOTO(out, rc);
693         
694         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_INITIAL)
695                 initial_conn = 1;
696         
697         /* lctl gets a backstage, all-access pass. */
698         if (obd_uuid_equals(&cluuid, &target->obd_uuid))
699                 goto dont_check_exports;
700
701         spin_lock(&target->obd_dev_lock);
702         list_for_each(p, &target->obd_exports) {
703                 export = list_entry(p, struct obd_export, exp_obd_chain);
704                 if (obd_uuid_equals(&cluuid, &export->exp_client_uuid)) {
705                         spin_unlock(&target->obd_dev_lock);
706                         LASSERT(export->exp_obd == target);
707
708                         rc = target_handle_reconnect(&conn, export, &cluuid,
709                                                      initial_conn);
710                         break;
711                 }
712                 export = NULL;
713         }
714         /* If we found an export, we already unlocked. */
715         if (!export) {
716                 spin_unlock(&target->obd_dev_lock);
717         } else if (req->rq_export == NULL && 
718                    atomic_read(&export->exp_rpc_count) > 0) {
719                 CWARN("%s: refuse connection from %s/%s to 0x%p/%d\n",
720                       target->obd_name, cluuid.uuid,
721                       ptlrpc_peernid2str(&req->rq_peer, peer_str),
722                       export, atomic_read(&export->exp_refcount));
723                 GOTO(out, rc = -EBUSY);
724         } else if (req->rq_export != NULL &&
725                    atomic_read(&export->exp_rpc_count) > 1) {
726                 CWARN("%s: refuse reconnection from %s@%s to 0x%p/%d\n",
727                       target->obd_name, cluuid.uuid,
728                       ptlrpc_peernid2str(&req->rq_peer, peer_str),
729                       export, atomic_read(&export->exp_rpc_count));
730                 GOTO(out, rc = -EBUSY);
731         } else if (req->rq_reqmsg->conn_cnt == 1 && !initial_conn) {
732                 CERROR("%s reconnected with 1 conn_cnt; cookies not random?\n",
733                        cluuid.uuid);
734                 GOTO(out, rc = -EALREADY);
735         }
736
737         /* Tell the client if we're in recovery. */
738         /* If this is the first client, start the recovery timer */
739         CWARN("%s: connection from %s@%s/%lu %st"LPU64"\n", target->obd_name,
740               cluuid.uuid, ptlrpc_peernid2str(&req->rq_peer, peer_str), *cfp,
741               target->obd_recovering ? "recovering/" : "", conn_data->transno);
742
743         if (target->obd_recovering) {
744                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECOVERING);
745                 target_start_recovery_timer(target);
746         }
747
748 #if 0
749         /* Tell the client if we support replayable requests */
750         if (target->obd_replayable)
751                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_REPLAYABLE);
752 #endif
753
754         if (export == NULL) {
755                 if (target->obd_recovering) {
756                         CERROR("%s denying connection for new client %s@%s: "
757                                "%d clients in recovery for %lds\n", target->obd_name, 
758                                cluuid.uuid,
759                                ptlrpc_peernid2str(&req->rq_peer, peer_str),
760                                target->obd_recoverable_clients,
761                                (target->obd_recovery_timer.expires-jiffies)/HZ);
762                         rc = -EBUSY;
763                 } else {
764  dont_check_exports:
765                         rc = obd_connect(&conn, target, &cluuid, conn_data,
766                                          connect_flags);
767                 }
768         }
769
770         /* Return only the parts of obd_connect_data that we understand, so the
771          * client knows that we don't understand the rest. */
772         conn_data->ocd_connect_flags &= OBD_CONNECT_SUPPORTED;
773         memcpy(lustre_msg_buf(req->rq_repmsg, 0, sizeof(*conn_data)), conn_data,
774                sizeof(*conn_data));
775
776         /* Tell the client if we support replayable requests */
777         if (target->obd_replayable)
778                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_REPLAYABLE);
779
780         /* If all else goes well, this is our RPC return code. */
781         req->rq_status = 0;
782
783         if (rc && rc != EALREADY)
784                 GOTO(out, rc);
785
786         req->rq_repmsg->handle = conn;
787
788         /* If the client and the server are the same node, we will already
789          * have an export that really points to the client's DLM export,
790          * because we have a shared handles table.
791          *
792          * XXX this will go away when shaver stops sending the "connect" handle
793          * in the real "remote handle" field of the request --phik 24 Apr 2003
794          */
795         if (req->rq_export != NULL)
796                 class_export_put(req->rq_export);
797
798         /* ownership of this export ref transfers to the request */
799         export = req->rq_export = class_conn2export(&conn);
800         LASSERT(export != NULL);
801
802         spin_lock_irqsave(&export->exp_lock, flags);
803         if (initial_conn) {
804                 req->rq_repmsg->conn_cnt = export->exp_conn_cnt + 1;
805         } else if (export->exp_conn_cnt >= req->rq_reqmsg->conn_cnt) {
806                 CERROR("%s@%s: already connected at a higher conn_cnt: %d > %d\n",
807                        cluuid.uuid, ptlrpc_peernid2str(&req->rq_peer, peer_str),
808                        export->exp_conn_cnt, 
809                        req->rq_reqmsg->conn_cnt);
810                 spin_unlock_irqrestore(&export->exp_lock, flags);
811                 GOTO(out, rc = -EALREADY);
812         } 
813         export->exp_conn_cnt = req->rq_reqmsg->conn_cnt;
814         spin_unlock_irqrestore(&export->exp_lock, flags);
815
816         /* request from liblustre? */
817         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT)
818                 export->exp_libclient = 1;
819
820         if (!(lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_ASYNC) &&
821             ptlrpc_peer_is_local(&req->rq_peer)) {
822                 CWARN("%s: exp %p set sync\n", target->obd_name, export);
823                 export->exp_sync = 1;
824         } else {
825                 CDEBUG(D_HA, "%s: exp %p set async\n",target->obd_name,export);
826                 export->exp_sync = 0;
827         }
828
829         if (export->exp_connection != NULL)
830                 ptlrpc_put_connection(export->exp_connection);
831         export->exp_connection = ptlrpc_get_connection(&req->rq_peer,
832                                                        &remote_uuid);
833
834         if (rc == EALREADY) {
835                 /* We indicate the reconnection in a flag, not an error code. */
836                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECONNECT);
837                 GOTO(out, rc = 0);
838         }
839
840         spin_lock_bh(&target->obd_processing_task_lock);
841         if (target->obd_recovering && export->exp_connected == 0) {
842                 __u64 t = conn_data->transno;
843                 export->exp_connected = 1;
844                 if ((lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_TRANSNO)
845                                 && t < target->obd_next_recovery_transno)
846                         target->obd_next_recovery_transno = t;
847                 target->obd_connected_clients++;
848                 if (target->obd_connected_clients == target->obd_max_recoverable_clients)
849                         wake_up(&target->obd_next_transno_waitq);
850         }
851         spin_unlock_bh(&target->obd_processing_task_lock);
852
853         memcpy(&conn, lustre_msg_buf(req->rq_reqmsg, offset + 2, sizeof(conn)),
854                sizeof(conn));
855
856         if (export->exp_imp_reverse != NULL) {
857                 /* same logic as client_obd_cleanup */
858                 class_import_get(export->exp_imp_reverse);
859                 class_destroy_import(export->exp_imp_reverse);
860                 ptlrpcs_import_drop_sec(export->exp_imp_reverse);
861                 class_import_put(export->exp_imp_reverse);
862         }
863
864         /* for the rest part, we return -ENOTCONN in case of errors
865          * in order to let client initialize connection again.
866          */
867         revimp = export->exp_imp_reverse = class_new_import();
868         if (!revimp) {
869                 CERROR("fail to alloc new reverse import.\n");
870                 GOTO(out, rc = -ENOTCONN);
871         }
872
873         revimp->imp_connection = ptlrpc_connection_addref(export->exp_connection);
874         revimp->imp_client = &export->exp_obd->obd_ldlm_client;
875         revimp->imp_remote_handle = conn;
876         revimp->imp_obd = target;
877         revimp->imp_dlm_fake = 1;
878         revimp->imp_state = LUSTRE_IMP_FULL;
879
880         rc = ptlrpcs_import_get_sec(revimp);
881         if (rc) {
882                 CERROR("reverse import can not get sec: %d\n", rc);
883                 class_destroy_import(revimp);
884                 export->exp_imp_reverse = NULL;
885                 GOTO(out, rc = -ENOTCONN);
886         }
887
888         class_import_put(revimp);
889
890         rc = obd_connect_post(export, initial_conn, connect_flags);
891 out:
892         if (rc)
893                 req->rq_status = rc;
894         RETURN(rc);
895 }
896
897 int target_handle_disconnect(struct ptlrpc_request *req)
898 {
899         struct obd_export *exp;
900         int rc;
901         ENTRY;
902
903         rc = lustre_pack_reply(req, 0, NULL, NULL);
904         if (rc)
905                 RETURN(rc);
906
907         /* keep the rq_export around so we can send the reply */
908         exp = class_export_get(req->rq_export);
909         req->rq_status = obd_disconnect(exp, 0);
910         RETURN(0);
911 }
912
913 void target_destroy_export(struct obd_export *exp)
914 {
915         /* exports created from last_rcvd data, and "fake"
916            exports created by lctl don't have an import */
917         if (exp->exp_imp_reverse != NULL) {
918                 ptlrpcs_import_drop_sec(exp->exp_imp_reverse);
919                 class_destroy_import(exp->exp_imp_reverse);
920         }
921
922         /* We cancel locks at disconnect time, but this will catch any locks
923          * granted in a race with recovery-induced disconnect. */
924         if (exp->exp_obd->obd_namespace != NULL)
925                 ldlm_cancel_locks_for_export(exp);
926 }
927
928 /*
929  * Recovery functions
930  */
931
932 struct ptlrpc_request *
933 ptlrpc_clone_req( struct ptlrpc_request *orig_req) 
934 {
935         struct ptlrpc_request *copy_req;
936         struct lustre_msg *copy_reqmsg;
937
938         OBD_ALLOC(copy_req, sizeof *copy_req);
939         if (!copy_req)
940                 return NULL;
941         OBD_ALLOC(copy_reqmsg, orig_req->rq_reqlen);
942         if (!copy_reqmsg){
943                 OBD_FREE(copy_req, sizeof *copy_req);
944                 return NULL;
945         }
946
947         memcpy(copy_req, orig_req, sizeof *copy_req);
948         memcpy(copy_reqmsg, orig_req->rq_reqmsg, orig_req->rq_reqlen);
949         /* the copied req takes over the reply state and security data */
950         orig_req->rq_reply_state = NULL;
951         orig_req->rq_svcsec_data = NULL;
952
953         copy_req->rq_reqmsg = copy_reqmsg;
954         class_export_get(copy_req->rq_export);
955         INIT_LIST_HEAD(&copy_req->rq_list);
956
957         return copy_req;
958 }
959
960 void ptlrpc_free_clone( struct ptlrpc_request *req) 
961 {
962         if (req->rq_svcsec)
963                 svcsec_cleanup_req(req);
964
965         class_export_put(req->rq_export);
966         list_del(&req->rq_list);
967         OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
968         OBD_FREE(req, sizeof *req);
969 }
970
971 static void target_release_saved_req(struct ptlrpc_request *req)
972 {
973         if (req->rq_svcsec)
974                 svcsec_cleanup_req(req);
975
976         class_export_put(req->rq_export);
977         OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
978         OBD_FREE(req, sizeof *req);
979 }
980
981 static void target_finish_recovery(struct obd_device *obd)
982 {
983         int rc;
984
985         ldlm_reprocess_all_ns(obd->obd_namespace);
986
987         /* when recovery finished, cleanup orphans on mds and ost */
988         if (OBT(obd) && OBP(obd, postrecov)) {
989                 rc = OBP(obd, postrecov)(obd);
990                 if (rc >= 0)
991                         CWARN("%s: all clients recovered, %d MDS "
992                               "orphans deleted\n", obd->obd_name, rc);
993                 else
994                         CERROR("postrecov failed %d\n", rc);
995         }
996
997         obd->obd_recovery_end = LTIME_S(CURRENT_TIME);
998         return;
999 }
1000
1001 static void abort_req_replay_queue(struct obd_device *obd)
1002 {
1003         struct ptlrpc_request *req;
1004         struct list_head *tmp, *n;
1005         int rc;
1006
1007         list_for_each_safe(tmp, n, &obd->obd_req_replay_queue) {
1008                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1009                 list_del(&req->rq_list);
1010                 DEBUG_REQ(D_ERROR, req, "aborted:");
1011                 req->rq_status = -ENOTCONN;
1012                 req->rq_type = PTL_RPC_MSG_ERR;
1013                 rc = lustre_pack_reply(req, 0, NULL, NULL);
1014                 if (rc == 0) {
1015                         ptlrpc_reply(req);
1016                 } else {
1017                         DEBUG_REQ(D_ERROR, req,
1018                                   "packing failed for abort-reply; skipping");
1019                 }
1020                 target_release_saved_req(req);
1021         }
1022 }
1023
1024 static void abort_lock_replay_queue(struct obd_device *obd)
1025 {
1026         struct ptlrpc_request *req;
1027         struct list_head *tmp, *n;
1028         int rc;
1029
1030         list_for_each_safe(tmp, n, &obd->obd_lock_replay_queue) {
1031                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1032                 list_del(&req->rq_list);
1033                 DEBUG_REQ(D_ERROR, req, "aborted:");
1034                 req->rq_status = -ENOTCONN;
1035                 req->rq_type = PTL_RPC_MSG_ERR;
1036                 rc = lustre_pack_reply(req, 0, NULL, NULL);
1037                 if (rc == 0) {
1038                         ptlrpc_reply(req);
1039                 } else {
1040                         DEBUG_REQ(D_ERROR, req,
1041                                   "packing failed for abort-reply; skipping");
1042                 }
1043                 target_release_saved_req(req);
1044         }
1045 }
1046
1047 /* Called from a cleanup function if the device is being cleaned up
1048    forcefully.  The exports should all have been disconnected already,
1049    the only thing left to do is
1050      - clear the recovery flags
1051      - cancel the timer
1052      - free queued requests and replies, but don't send replies
1053    Because the obd_stopping flag is set, no new requests should be received.
1054
1055 */
1056 void target_cleanup_recovery(struct obd_device *obd)
1057 {
1058         struct list_head *tmp, *n;
1059         struct ptlrpc_request *req;
1060
1061         spin_lock_bh(&obd->obd_processing_task_lock);
1062         if (!obd->obd_recovering) {
1063                 spin_unlock_bh(&obd->obd_processing_task_lock);
1064                 EXIT;
1065                 return;
1066         }
1067         obd->obd_recovering = obd->obd_abort_recovery = 0;
1068         target_cancel_recovery_timer(obd);
1069         spin_unlock_bh(&obd->obd_processing_task_lock);
1070
1071         list_for_each_safe(tmp, n, &obd->obd_req_replay_queue) {
1072                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1073                 list_del(&req->rq_list);
1074                 LASSERT (req->rq_reply_state == 0);
1075                 target_release_saved_req(req);
1076         }
1077         list_for_each_safe(tmp, n, &obd->obd_lock_replay_queue) {
1078                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1079                 list_del(&req->rq_list);
1080                 LASSERT (req->rq_reply_state == 0);
1081                 target_release_saved_req(req);
1082         }
1083         list_for_each_safe(tmp, n, &obd->obd_final_req_queue) {
1084                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1085                 list_del(&req->rq_list);
1086                 LASSERT (req->rq_reply_state == 0);
1087                 target_release_saved_req(req);
1088         }
1089 }
1090
1091 #if 0
1092 static void target_abort_recovery(void *data)
1093 {
1094         struct obd_device *obd = data;
1095
1096         LASSERT(!obd->obd_recovering);
1097
1098         class_disconnect_stale_exports(obd, 0);
1099
1100         CERROR("%s: recovery period over; disconnecting unfinished clients.\n",
1101                obd->obd_name);
1102
1103         abort_recovery_queue(obd);
1104         target_finish_recovery(obd);
1105         ptlrpc_run_recovery_over_upcall(obd);
1106 }
1107 #endif
1108
1109 static void target_recovery_expired(unsigned long castmeharder)
1110 {
1111         struct obd_device *obd = (struct obd_device *)castmeharder;
1112         spin_lock_bh(&obd->obd_processing_task_lock);
1113         if (obd->obd_recovering)
1114                 obd->obd_abort_recovery = 1;
1115
1116         wake_up(&obd->obd_next_transno_waitq);
1117         spin_unlock_bh(&obd->obd_processing_task_lock);
1118 }
1119
1120
1121 /* obd_processing_task_lock should be held */
1122 void target_cancel_recovery_timer(struct obd_device *obd)
1123 {
1124         CDEBUG(D_HA, "%s: cancel recovery timer\n", obd->obd_name);
1125         del_timer(&obd->obd_recovery_timer);
1126 }
1127
1128 #ifdef __KERNEL__
1129 static void reset_recovery_timer(struct obd_device *obd)
1130 {
1131         spin_lock_bh(&obd->obd_processing_task_lock);
1132         if (!obd->obd_recovering) {
1133                 spin_unlock_bh(&obd->obd_processing_task_lock);
1134                 return;
1135         }                
1136         CDEBUG(D_HA, "timer will expire in %u seconds\n",
1137                OBD_RECOVERY_TIMEOUT / HZ);
1138         mod_timer(&obd->obd_recovery_timer, jiffies + OBD_RECOVERY_TIMEOUT);
1139         spin_unlock_bh(&obd->obd_processing_task_lock);
1140 }
1141 #endif
1142
1143 /* Only start it the first time called */
1144 void target_start_recovery_timer(struct obd_device *obd)
1145 {
1146         spin_lock_bh(&obd->obd_processing_task_lock);
1147         if (!obd->obd_recovering || timer_pending(&obd->obd_recovery_timer)) {
1148                 spin_unlock_bh(&obd->obd_processing_task_lock);
1149                 return;
1150         }
1151         CWARN("%s: starting recovery timer (%us)\n", obd->obd_name,
1152                OBD_RECOVERY_TIMEOUT / HZ);
1153         obd->obd_recovery_timer.function = target_recovery_expired;
1154         obd->obd_recovery_timer.data = (unsigned long)obd;
1155         mod_timer(&obd->obd_recovery_timer, jiffies + OBD_RECOVERY_TIMEOUT);
1156         spin_unlock_bh(&obd->obd_processing_task_lock);
1157 }
1158
1159 #ifdef __KERNEL__
1160 static int check_for_next_transno(struct obd_device *obd)
1161 {
1162         struct ptlrpc_request *req = NULL;
1163         int wake_up = 0, connected, completed, queue_len, max;
1164         __u64 next_transno, req_transno;
1165
1166         spin_lock_bh(&obd->obd_processing_task_lock);
1167         if (!list_empty(&obd->obd_req_replay_queue)) {
1168                 req = list_entry(obd->obd_req_replay_queue.next,
1169                                  struct ptlrpc_request, rq_list);
1170                 req_transno = req->rq_reqmsg->transno;
1171         } else {
1172                 req_transno = 0;
1173         }
1174
1175         max = obd->obd_max_recoverable_clients;
1176         connected = obd->obd_connected_clients;
1177         completed = max - obd->obd_recoverable_clients;
1178         queue_len = obd->obd_requests_queued_for_recovery;
1179         next_transno = obd->obd_next_recovery_transno;
1180
1181         CDEBUG(D_HA,"max: %d, connected: %d, completed: %d, queue_len: %d, "
1182                "req_transno: "LPU64", next_transno: "LPU64"\n",
1183                max, connected, completed, queue_len, req_transno, next_transno);
1184         if (obd->obd_abort_recovery) {
1185                 CDEBUG(D_HA, "waking for aborted recovery\n");
1186                 wake_up = 1;
1187         } else if (atomic_read(&obd->obd_req_replay_clients) == 0) {
1188                 CDEBUG(D_HA, "waking for completed recovery\n");
1189                 wake_up = 1;
1190         } else if (req_transno == next_transno) {
1191                 CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
1192                 wake_up = 1;
1193         } else if (queue_len + completed == max) {
1194                 LASSERT(req->rq_reqmsg->transno >= next_transno);
1195                 CDEBUG(req_transno > obd->obd_last_committed ? D_ERROR : D_HA,
1196                        "waking for skipped transno (skip: "LPD64
1197                        ", ql: %d, comp: %d, conn: %d, next: "LPD64")\n",
1198                        next_transno, queue_len, completed, max, req_transno);
1199                 obd->obd_next_recovery_transno = req_transno;
1200                 wake_up = 1;
1201         } else if (queue_len == atomic_read(&obd->obd_req_replay_clients)) {
1202                 /* some clients haven't connected in time, but we can try
1203                  * to replay requests that demand on already committed ones
1204                  * also, we can replay first non-committed transation */
1205                 LASSERT(req_transno != 0);
1206                 if (req_transno == obd->obd_last_committed + 1) {
1207                         obd->obd_next_recovery_transno = req_transno;
1208                 } else if (req_transno > obd->obd_last_committed) {
1209                         /* can't continue recovery: have no needed transno */
1210                         obd->obd_abort_recovery = 1;
1211                         CDEBUG(D_ERROR, "abort due to missed clients. max: %d, "
1212                                "connected: %d, completed: %d, queue_len: %d, "
1213                                "req_transno: "LPU64", next_transno: "LPU64"\n",
1214                                max, connected, completed, queue_len,
1215                                req_transno, next_transno);
1216                 }
1217                 wake_up = 1;
1218         }
1219         spin_unlock_bh(&obd->obd_processing_task_lock);
1220         
1221         return wake_up;
1222 }
1223
1224 static struct ptlrpc_request *
1225 target_next_replay_req(struct obd_device *obd)
1226 {
1227         struct l_wait_info lwi = { 0 };
1228         struct ptlrpc_request *req;
1229
1230         CDEBUG(D_HA, "Waiting for transno "LPD64"\n",
1231                obd->obd_next_recovery_transno);
1232         l_wait_event(obd->obd_next_transno_waitq,
1233                      check_for_next_transno(obd), &lwi);
1234         
1235         spin_lock_bh(&obd->obd_processing_task_lock);
1236         if (obd->obd_abort_recovery) {
1237                 req = NULL;
1238         } else if (!list_empty(&obd->obd_req_replay_queue)) {
1239                 req = list_entry(obd->obd_req_replay_queue.next,
1240                                  struct ptlrpc_request, rq_list);
1241                 list_del_init(&req->rq_list);
1242                 obd->obd_requests_queued_for_recovery--;
1243         } else {
1244                 req = NULL;
1245         }
1246         spin_unlock_bh(&obd->obd_processing_task_lock);
1247         return req;
1248 }
1249
1250 static int check_for_next_lock(struct obd_device *obd)
1251 {
1252         struct ptlrpc_request *req = NULL;
1253         int wake_up = 0;
1254
1255         spin_lock_bh(&obd->obd_processing_task_lock);
1256         if (!list_empty(&obd->obd_lock_replay_queue)) {
1257                 req = list_entry(obd->obd_lock_replay_queue.next,
1258                                  struct ptlrpc_request, rq_list);
1259                 CDEBUG(D_HA, "waking for next lock\n");
1260                 wake_up = 1;
1261         } else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
1262                 CDEBUG(D_HA, "waking for completed lock replay\n");
1263                 wake_up = 1;
1264         } else if (obd->obd_abort_recovery) {
1265                 CDEBUG(D_HA, "waking for aborted recovery\n");
1266                 wake_up = 1;
1267         }
1268         spin_unlock_bh(&obd->obd_processing_task_lock);
1269         
1270         return wake_up;
1271 }
1272
1273 static struct ptlrpc_request *
1274 target_next_replay_lock(struct obd_device *obd)
1275 {
1276         struct l_wait_info lwi = { 0 };
1277         struct ptlrpc_request *req;
1278
1279         CDEBUG(D_HA, "Waiting for lock\n");
1280         l_wait_event(obd->obd_next_transno_waitq,
1281                      check_for_next_lock(obd), &lwi);
1282         
1283         spin_lock_bh(&obd->obd_processing_task_lock);
1284         if (obd->obd_abort_recovery) {
1285                 req = NULL;
1286         } else if (!list_empty(&obd->obd_lock_replay_queue)) {
1287                 req = list_entry(obd->obd_lock_replay_queue.next,
1288                                  struct ptlrpc_request, rq_list);
1289                 list_del_init(&req->rq_list);
1290         } else {
1291                 req = NULL;
1292         }
1293         spin_unlock_bh(&obd->obd_processing_task_lock);
1294         return req;
1295 }
1296
1297 static struct ptlrpc_request *
1298 target_next_final_ping(struct obd_device *obd)
1299 {
1300         struct ptlrpc_request *req;
1301
1302         spin_lock_bh(&obd->obd_processing_task_lock);
1303         if (!list_empty(&obd->obd_final_req_queue)) {
1304                 req = list_entry(obd->obd_final_req_queue.next,
1305                                  struct ptlrpc_request, rq_list);
1306                 list_del_init(&req->rq_list);
1307         } else {
1308                 req = NULL;
1309         }
1310         spin_unlock_bh(&obd->obd_processing_task_lock);
1311         return req;
1312 }
1313
1314 static int req_replay_done(struct obd_export *exp)
1315 {
1316         if (exp->exp_req_replay_needed)
1317                 return 0;
1318         return 1;
1319 }
1320
1321 static int lock_replay_done(struct obd_export *exp)
1322 {
1323         if (exp->exp_lock_replay_needed)
1324                 return 0;
1325         return 1;
1326 }
1327
1328 static int connect_done(struct obd_export *exp)
1329 {
1330         if (exp->exp_connected)
1331                 return 1;
1332         return 0;
1333 }
1334
1335 static int check_for_clients(struct obd_device *obd)
1336 {
1337         if (obd->obd_abort_recovery)
1338                 return 1;
1339         LASSERT(obd->obd_connected_clients <= obd->obd_max_recoverable_clients);
1340         if (obd->obd_connected_clients == obd->obd_max_recoverable_clients)
1341                 return 1;
1342         return 0;
1343 }
1344
1345 static int target_recovery_thread(void *arg)
1346 {
1347         struct obd_device *obd = arg;
1348         struct ptlrpc_request *req;
1349         struct target_recovery_data *trd = &obd->obd_recovery_data;
1350         char peer_str[PTL_NALFMT_SIZE];
1351         struct l_wait_info lwi = { 0 };
1352         unsigned long delta;
1353         unsigned long flags;
1354         ENTRY;
1355
1356         kportal_daemonize("tgt-recov");
1357
1358         SIGNAL_MASK_LOCK(current, flags);
1359         sigfillset(&current->blocked);
1360         RECALC_SIGPENDING;
1361         SIGNAL_MASK_UNLOCK(current, flags);
1362
1363         CERROR("%s: started recovery thread pid %d\n", obd->obd_name, 
1364                current->pid);
1365         trd->trd_processing_task = current->pid;
1366
1367         obd->obd_recovering = 1;
1368         complete(&trd->trd_starting);
1369
1370         /* first of all, we have to know the first transno to replay */
1371         obd->obd_abort_recovery = 0;
1372         l_wait_event(obd->obd_next_transno_waitq,
1373                      check_for_clients(obd), &lwi);
1374         
1375         spin_lock_bh(&obd->obd_processing_task_lock);
1376         target_cancel_recovery_timer(obd);
1377         spin_unlock_bh(&obd->obd_processing_task_lock);
1378
1379         /* If some clients haven't connected in time, evict them */
1380         if (obd->obd_abort_recovery) {
1381                 int stale;
1382                 CDEBUG(D_ERROR, "few clients haven't connect in time (%d/%d),"
1383                        "evict them ...\n", obd->obd_connected_clients,
1384                        obd->obd_max_recoverable_clients);
1385                 obd->obd_abort_recovery = 0;
1386                 stale = class_disconnect_stale_exports(obd, connect_done, 0);
1387                 atomic_sub(stale, &obd->obd_req_replay_clients);
1388                 atomic_sub(stale, &obd->obd_lock_replay_clients);
1389         }
1390
1391         /* next stage: replay requests */
1392         delta = jiffies;
1393         CDEBUG(D_ERROR, "1: request replay stage - %d clients from t"LPU64"\n",
1394               atomic_read(&obd->obd_req_replay_clients),
1395               obd->obd_next_recovery_transno);
1396         while ((req = target_next_replay_req(obd))) {
1397                 LASSERT(trd->trd_processing_task == current->pid);
1398                 DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s", 
1399                           req->rq_reqmsg->transno, 
1400                           ptlrpc_peernid2str(&req->rq_peer, peer_str));
1401                 (void)trd->trd_recovery_handler(req);
1402                 obd->obd_replayed_requests++;
1403                 reset_recovery_timer(obd);
1404                 /* bug 1580: decide how to properly sync() in recovery*/
1405                 //mds_fsync_super(mds->mds_sb);
1406                 ptlrpc_free_clone(req);
1407                 spin_lock_bh(&obd->obd_processing_task_lock);
1408                 obd->obd_next_recovery_transno++;
1409                 spin_unlock_bh(&obd->obd_processing_task_lock);
1410         }
1411
1412         spin_lock_bh(&obd->obd_processing_task_lock);
1413         target_cancel_recovery_timer(obd);
1414         spin_unlock_bh(&obd->obd_processing_task_lock);
1415
1416         /* If some clients haven't replayed requests in time, evict them */
1417         if (obd->obd_abort_recovery) {
1418                 int stale;
1419                 CDEBUG(D_ERROR, "req replay timed out, aborting ...\n");
1420                 obd->obd_abort_recovery = 0;
1421                 stale = class_disconnect_stale_exports(obd, req_replay_done, 0);
1422                 atomic_sub(stale, &obd->obd_lock_replay_clients);
1423                 abort_req_replay_queue(obd);
1424                 /* XXX for debuggin tests 11 and 17 */
1425                 LBUG();
1426         }
1427
1428         /* The second stage: replay locks */
1429         CDEBUG(D_ERROR, "2: lock replay stage - %d clients\n",
1430               atomic_read(&obd->obd_lock_replay_clients));
1431         while ((req = target_next_replay_lock(obd))) {
1432                 LASSERT(trd->trd_processing_task == current->pid);
1433                 DEBUG_REQ(D_HA, req, "processing lock from %s: ", 
1434                           ptlrpc_peernid2str(&req->rq_peer, peer_str));
1435                 (void)trd->trd_recovery_handler(req);
1436                 reset_recovery_timer(obd);
1437                 ptlrpc_free_clone(req);
1438                 obd->obd_replayed_locks++;
1439         }
1440         
1441         spin_lock_bh(&obd->obd_processing_task_lock);
1442         target_cancel_recovery_timer(obd);
1443         spin_unlock_bh(&obd->obd_processing_task_lock);
1444
1445         /* If some clients haven't replayed requests in time, evict them */
1446         if (obd->obd_abort_recovery) {
1447                 int stale;
1448                 CERROR("lock replay timed out, aborting ...\n");
1449                 obd->obd_abort_recovery = 0;
1450                 stale = class_disconnect_stale_exports(obd, lock_replay_done, 0);
1451                 abort_lock_replay_queue(obd);
1452         }
1453
1454         /* We drop recoverying flag to forward all new requests
1455          * to regular mds_handle() since now */
1456         spin_lock_bh(&obd->obd_processing_task_lock);
1457         obd->obd_recovering = 0;
1458         spin_unlock_bh(&obd->obd_processing_task_lock);
1459
1460         /* The third stage: reply on final pings */
1461         CDEBUG(D_ERROR, "3: final stage - process recovery completion pings\n");
1462         while ((req = target_next_final_ping(obd))) {
1463                 LASSERT(trd->trd_processing_task == current->pid);
1464                 DEBUG_REQ(D_HA, req, "processing final ping from %s: ", 
1465                           ptlrpc_peernid2str(&req->rq_peer, peer_str));
1466                 (void)trd->trd_recovery_handler(req);
1467                 ptlrpc_free_clone(req);
1468         }
1469        
1470         delta = (jiffies - delta) / HZ;
1471         CDEBUG(D_ERROR,"4: recovery completed in %lus - %d/%d reqs/locks\n",
1472               delta, obd->obd_replayed_requests, obd->obd_replayed_locks);
1473         if (delta > obd_timeout * 2) {
1474                 CWARN("too long recovery - read logs\n");
1475                 portals_debug_dumplog();
1476         }
1477         target_finish_recovery(obd);
1478
1479         trd->trd_processing_task = 0;
1480         complete(&trd->trd_finishing);
1481         return 0;
1482 }
1483
1484 int target_start_recovery_thread(struct obd_device *obd, svc_handler_t handler)
1485 {
1486         int rc = 0;
1487         struct target_recovery_data *trd = &obd->obd_recovery_data;
1488
1489         memset(trd, 0, sizeof(*trd));
1490         init_completion(&trd->trd_starting);
1491         init_completion(&trd->trd_finishing);
1492         trd->trd_recovery_handler = handler;
1493
1494         if (kernel_thread(target_recovery_thread, obd, 0) > 0) {
1495                 wait_for_completion(&trd->trd_starting);
1496                 LASSERT(obd->obd_recovering != 0);
1497         } else
1498                 rc = -ECHILD;
1499
1500         return rc;
1501 }
1502
1503 void target_stop_recovery_thread(struct obd_device *obd)
1504 {
1505         spin_lock_bh(&obd->obd_processing_task_lock);
1506         if (obd->obd_recovery_data.trd_processing_task > 0) {
1507                 struct target_recovery_data *trd = &obd->obd_recovery_data;
1508                 CERROR("%s: aborting recovery\n", obd->obd_name);
1509                 obd->obd_abort_recovery = 1;
1510                 wake_up(&obd->obd_next_transno_waitq);
1511                 spin_unlock_bh(&obd->obd_processing_task_lock);
1512                 wait_for_completion(&trd->trd_finishing);
1513         } else {
1514                 spin_unlock_bh(&obd->obd_processing_task_lock);
1515         }
1516 }
1517 #endif
1518
1519 int target_process_req_flags(struct obd_device *obd, struct ptlrpc_request *req)
1520 {
1521         struct obd_export *exp = req->rq_export;
1522         LASSERT(exp != NULL);
1523         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
1524                 /* client declares he's ready to replay locks */
1525                 spin_lock_bh(&obd->obd_processing_task_lock);
1526                 if (exp->exp_req_replay_needed) {
1527                         LASSERT(atomic_read(&obd->obd_req_replay_clients) > 0);
1528                         exp->exp_req_replay_needed = 0;
1529                         atomic_dec(&obd->obd_req_replay_clients);
1530                         obd->obd_recoverable_clients--;
1531                         if (atomic_read(&obd->obd_req_replay_clients) == 0)
1532                                 CDEBUG(D_HA, "all clients have replayed reqs\n");
1533                         wake_up(&obd->obd_next_transno_waitq);
1534                 }
1535                 spin_unlock_bh(&obd->obd_processing_task_lock);
1536         }
1537         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
1538                 /* client declares he's ready to complete recovery 
1539                  * so, we put the request on th final queue */
1540                 spin_lock_bh(&obd->obd_processing_task_lock);
1541                 if (exp->exp_lock_replay_needed) {
1542                         LASSERT(atomic_read(&obd->obd_lock_replay_clients) > 0);
1543                         exp->exp_lock_replay_needed = 0;
1544                         atomic_dec(&obd->obd_lock_replay_clients);
1545                         if (atomic_read(&obd->obd_lock_replay_clients) == 0)
1546                                 CDEBUG(D_HA, "all clients have replayed locks\n");
1547                         wake_up(&obd->obd_next_transno_waitq);
1548                 }
1549                 spin_unlock_bh(&obd->obd_processing_task_lock);
1550         }
1551
1552         return 0;
1553 }
1554
1555 int target_queue_recovery_request(struct ptlrpc_request *req,
1556                                   struct obd_device *obd)
1557 {
1558         struct list_head *tmp;
1559         int inserted = 0;
1560         __u64 transno = req->rq_reqmsg->transno;
1561
1562         if (obd->obd_recovery_data.trd_processing_task == current->pid) {
1563                 /* Processing the queue right now, don't re-add. */
1564                 return 1;
1565         }
1566
1567         target_process_req_flags(obd, req);
1568
1569         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
1570                 /* client declares he's ready to complete recovery 
1571                  * so, we put the request on th final queue */
1572                 req = ptlrpc_clone_req(req);
1573                 if (req == NULL)
1574                         return -ENOMEM;
1575                 DEBUG_REQ(D_HA, req, "queue final req");
1576                 spin_lock_bh(&obd->obd_processing_task_lock);
1577                 list_add_tail(&req->rq_list, &obd->obd_final_req_queue);
1578                 spin_unlock_bh(&obd->obd_processing_task_lock);
1579                 return 0;
1580         }
1581         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
1582                 /* client declares he's ready to replay locks */
1583                 req = ptlrpc_clone_req(req);
1584                 if (req == NULL)
1585                         return -ENOMEM;
1586                 DEBUG_REQ(D_HA, req, "queue lock replay req");
1587                 spin_lock_bh(&obd->obd_processing_task_lock);
1588                 list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
1589                 spin_unlock_bh(&obd->obd_processing_task_lock);
1590                 wake_up(&obd->obd_next_transno_waitq);
1591                 return 0;
1592         }
1593
1594
1595         /* CAVEAT EMPTOR: The incoming request message has been swabbed
1596          * (i.e. buflens etc are in my own byte order), but type-dependent
1597          * buffers (eg mds_body, ost_body etc) have NOT been swabbed. */
1598
1599         if (!transno) {
1600                 INIT_LIST_HEAD(&req->rq_list);
1601                 DEBUG_REQ(D_HA, req, "not queueing");
1602                 return 1;
1603         }
1604
1605
1606         /* If we're processing the queue, we want don't want to queue this
1607          * message.
1608          *
1609          * Also, if this request has a transno less than the one we're waiting
1610          * for, we should process it now.  It could (and currently always will)
1611          * be an open request for a descriptor that was opened some time ago.
1612          *
1613          * Also, a resent, replayed request that has already been
1614          * handled will pass through here and be processed immediately.
1615          */
1616         spin_lock_bh(&obd->obd_processing_task_lock);
1617         if (transno < obd->obd_next_recovery_transno && check_for_clients(obd)) {
1618                 /* Processing the queue right now, don't re-add. */
1619                 LASSERT(list_empty(&req->rq_list));
1620                 spin_unlock_bh(&obd->obd_processing_task_lock);
1621                 return 1;
1622         }
1623         spin_unlock_bh(&obd->obd_processing_task_lock);
1624
1625         /* A resent, replayed request that is still on the queue; just drop it.
1626            The queued request will handle this. */
1627         if ((lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY))
1628             == (MSG_RESENT | MSG_REPLAY)) {
1629                 DEBUG_REQ(D_ERROR, req, "dropping resent queued req");
1630                 return 0;
1631         }
1632
1633         req = ptlrpc_clone_req(req);
1634         if (req == NULL)
1635                 return -ENOMEM;
1636
1637         spin_lock_bh(&obd->obd_processing_task_lock);
1638
1639         /* XXX O(n^2) */
1640         list_for_each(tmp, &obd->obd_req_replay_queue) {
1641                 struct ptlrpc_request *reqiter =
1642                         list_entry(tmp, struct ptlrpc_request, rq_list);
1643
1644                 if (reqiter->rq_reqmsg->transno > transno) {
1645                         list_add_tail(&req->rq_list, &reqiter->rq_list);
1646                         inserted = 1;
1647                         break;
1648                 }
1649         }
1650
1651         if (!inserted)
1652                 list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
1653
1654         obd->obd_requests_queued_for_recovery++;
1655         wake_up(&obd->obd_next_transno_waitq);
1656         spin_unlock_bh(&obd->obd_processing_task_lock);
1657         return 0;
1658 }
1659
1660 struct obd_device * target_req2obd(struct ptlrpc_request *req)
1661 {
1662         return req->rq_export->exp_obd;
1663 }
1664
1665 int
1666 target_send_reply_msg (struct ptlrpc_request *req, int rc, int fail_id)
1667 {
1668         if (OBD_FAIL_CHECK(fail_id | OBD_FAIL_ONCE)) {
1669                 obd_fail_loc |= OBD_FAIL_ONCE | OBD_FAILED;
1670                 DEBUG_REQ(D_ERROR, req, "dropping reply");
1671                 /* NB this does _not_ send with ACK disabled, to simulate
1672                  * sending OK, but timing out for the ACK */
1673                 if (req->rq_reply_state != NULL) {
1674                         if (!req->rq_reply_state->rs_difficult) {
1675                                 lustre_free_reply_state (req->rq_reply_state);
1676                                 req->rq_reply_state = NULL;
1677                         } else {
1678                                 struct ptlrpc_service *svc =
1679                                         req->rq_rqbd->rqbd_srv_ni->sni_service;
1680                                 atomic_inc(&svc->srv_outstanding_replies);
1681                         }
1682                 }
1683                 return (-ECOMM);
1684         }
1685
1686         if (rc) {
1687                 req->rq_status = rc;
1688                 return (ptlrpc_error(req));
1689         } else {
1690                 DEBUG_REQ(D_NET, req, "sending reply");
1691         }
1692         
1693         return (ptlrpc_send_reply(req, 1));
1694 }
1695
1696 void 
1697 target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
1698 {
1699         int                        netrc;
1700         unsigned long              flags;
1701         struct ptlrpc_reply_state *rs;
1702         struct obd_device         *obd;
1703         struct obd_export         *exp;
1704         struct ptlrpc_srv_ni      *sni;
1705         struct ptlrpc_service     *svc;
1706
1707         sni = req->rq_rqbd->rqbd_srv_ni;
1708         svc = sni->sni_service;
1709         
1710         rs = req->rq_reply_state;
1711         if (rs == NULL || !rs->rs_difficult) {
1712                 /* The easy case; no notifiers and reply_out_callback()
1713                  * cleans up (i.e. we can't look inside rs after a
1714                  * successful send) */
1715                 netrc = target_send_reply_msg (req, rc, fail_id);
1716
1717                 LASSERT (netrc == 0 || req->rq_reply_state == NULL);
1718                 return;
1719         }
1720
1721         /* must be an export if locks saved */
1722         LASSERT (req->rq_export != NULL);
1723         /* req/reply consistent */
1724         LASSERT (rs->rs_srv_ni == sni);
1725
1726         /* "fresh" reply */
1727         LASSERT (!rs->rs_scheduled);
1728         LASSERT (!rs->rs_scheduled_ever);
1729         LASSERT (!rs->rs_handled);
1730         LASSERT (!rs->rs_on_net);
1731         LASSERT (rs->rs_export == NULL);
1732         LASSERT (list_empty(&rs->rs_obd_list));
1733         LASSERT (list_empty(&rs->rs_exp_list));
1734
1735         exp = class_export_get (req->rq_export);
1736         obd = exp->exp_obd;
1737
1738         /* disable reply scheduling onto srv_reply_queue while I'm setting up */
1739         rs->rs_scheduled = 1;
1740         rs->rs_on_net    = 1;
1741         rs->rs_xid       = req->rq_xid;
1742         rs->rs_transno   = req->rq_transno;
1743         rs->rs_export    = exp;
1744         
1745         spin_lock_irqsave (&obd->obd_uncommitted_replies_lock, flags);
1746
1747         if (rs->rs_transno > obd->obd_last_committed) {
1748                 /* not committed already */ 
1749                 list_add_tail (&rs->rs_obd_list, 
1750                                &obd->obd_uncommitted_replies);
1751         }
1752
1753         spin_unlock (&obd->obd_uncommitted_replies_lock);
1754         spin_lock (&exp->exp_lock);
1755
1756         list_add_tail (&rs->rs_exp_list, &exp->exp_outstanding_replies);
1757
1758         spin_unlock_irqrestore (&exp->exp_lock, flags);
1759
1760         netrc = target_send_reply_msg (req, rc, fail_id);
1761
1762         spin_lock_irqsave (&svc->srv_lock, flags);
1763
1764         svc->srv_n_difficult_replies++;
1765
1766         if (netrc != 0) /* error sending: reply is off the net */
1767                 rs->rs_on_net = 0;
1768
1769         if (!rs->rs_on_net ||                   /* some notifier */
1770             list_empty(&rs->rs_exp_list) ||     /* completed already */
1771             list_empty(&rs->rs_obd_list)) {
1772                 list_add_tail (&rs->rs_list, &svc->srv_reply_queue);
1773                 wake_up (&svc->srv_waitq);
1774         } else {
1775                 list_add (&rs->rs_list, &sni->sni_active_replies);
1776                 rs->rs_scheduled = 0;           /* allow notifier to schedule */
1777         }
1778
1779         spin_unlock_irqrestore (&svc->srv_lock, flags);
1780 }
1781
1782 int target_handle_ping(struct ptlrpc_request *req)
1783 {
1784         return lustre_pack_reply(req, 0, NULL, NULL);
1785 }