Whamcloud - gitweb
- all stats gatherd in OSC debugging time (cache loading, etc.) moved to proc. Added...
[fs/lustre-release.git] / lustre / ldlm / ldlm_lib.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #ifndef EXPORT_SYMTAB
23 # define EXPORT_SYMTAB
24 #endif
25 #define DEBUG_SUBSYSTEM S_LDLM
26
27 #ifdef __KERNEL__
28 # include <linux/module.h>
29 #else
30 # include <liblustre.h>
31 #endif
32 #include <linux/obd.h>
33 #include <linux/obd_ost.h> /* for LUSTRE_OSC_NAME */
34 #include <linux/lustre_mds.h> /* for LUSTRE_MDC_NAME */
35 #include <linux/lustre_mgmt.h>
36 #include <linux/lustre_dlm.h>
37 #include <linux/lustre_net.h>
38 #include <linux/lustre_sec.h>
39 #include <linux/lustre_gs.h>
40
41 /* @priority: if non-zero, move the selected to the list head
42  * @nocreate: if non-zero, only search in existed connections
43  */
44 static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
45                            int priority, int nocreate)
46 {
47         struct ptlrpc_connection *ptlrpc_conn;
48         struct obd_import_conn *imp_conn = NULL, *item;
49         int rc = 0;
50         ENTRY;
51
52         LASSERT(!(nocreate && !priority));
53
54         ptlrpc_conn = ptlrpc_uuid_to_connection(uuid);
55         if (!ptlrpc_conn) {
56                 CERROR("can't find connection %s\n", uuid->uuid);
57                 RETURN (-EINVAL);
58         }
59
60         if (!nocreate) {
61                 OBD_ALLOC(imp_conn, sizeof(*imp_conn));
62                 if (!imp_conn) {
63                         CERROR("fail to alloc memory\n");
64                         GOTO(out_put, rc = -ENOMEM);
65                 }
66         }
67
68         spin_lock(&imp->imp_lock);
69         list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
70                 if (obd_uuid_equals(uuid, &item->oic_uuid)) {
71                         if (priority) {
72                                 list_del(&item->oic_item);
73                                 list_add(&item->oic_item, &imp->imp_conn_list);
74                                 item->oic_last_attempt = 0;
75                         }
76                         CDEBUG(D_HA, "imp %p@%s: find existed conn %s%s\n",
77                                imp, imp->imp_obd->obd_name, uuid->uuid,
78                                (priority ? ", move to head." : ""));
79                         spin_unlock(&imp->imp_lock);
80                         GOTO(out_free, rc = 0);
81                 }
82         }
83         /* not found */
84         if (!nocreate) {
85                 imp_conn->oic_conn = ptlrpc_conn;
86                 imp_conn->oic_uuid = *uuid;
87                 imp_conn->oic_last_attempt = 0;
88                 if (priority)
89                         list_add(&imp_conn->oic_item, &imp->imp_conn_list);
90                 else
91                         list_add_tail(&imp_conn->oic_item, &imp->imp_conn_list);
92                 CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
93                        imp, imp->imp_obd->obd_name, uuid->uuid,
94                        (priority ? "head" : "tail"));
95         } else
96                 rc = -ENOENT;
97
98         spin_unlock(&imp->imp_lock);
99         RETURN(0);
100 out_free:
101         if (imp_conn)
102                 OBD_FREE(imp_conn, sizeof(*imp_conn));
103 out_put:
104         ptlrpc_put_connection(ptlrpc_conn);
105         RETURN(rc);
106 }
107
108 int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid)
109 {
110         return import_set_conn(imp, uuid, 1, 1);
111 }
112
113 int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
114                            int priority)
115 {
116         return import_set_conn(imp, uuid, priority, 0);
117 }
118
119 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
120 {
121         struct obd_import_conn *imp_conn;
122         struct obd_export *dlmexp;
123         int rc = -ENOENT;
124         ENTRY;
125
126         spin_lock(&imp->imp_lock);
127         if (list_empty(&imp->imp_conn_list)) {
128                 LASSERT(!imp->imp_conn_current);
129                 LASSERT(!imp->imp_connection);
130                 GOTO(out, rc);
131         }
132
133         list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
134                 if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
135                         continue;
136                 LASSERT(imp_conn->oic_conn);
137
138                 /* is current conn? */
139                 if (imp_conn == imp->imp_conn_current) {
140                         LASSERT(imp_conn->oic_conn == imp->imp_connection);
141
142                         if (imp->imp_state != LUSTRE_IMP_CLOSED &&
143                             imp->imp_state != LUSTRE_IMP_DISCON) {
144                                 CERROR("can't remove current connection\n");
145                                 GOTO(out, rc = -EBUSY);
146                         }
147
148                         ptlrpc_put_connection(imp->imp_connection);
149                         imp->imp_connection = NULL;
150
151                         dlmexp = class_conn2export(&imp->imp_dlm_handle);
152                         if (dlmexp && dlmexp->exp_connection) {
153                                 LASSERT(dlmexp->exp_connection ==
154                                         imp_conn->oic_conn);
155                                 ptlrpc_put_connection(dlmexp->exp_connection);
156                                 dlmexp->exp_connection = NULL;
157                         }
158                 }
159
160                 list_del(&imp_conn->oic_item);
161                 ptlrpc_put_connection(imp_conn->oic_conn);
162                 OBD_FREE(imp_conn, sizeof(*imp_conn));
163                 CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
164                        imp, imp->imp_obd->obd_name, uuid->uuid);
165                 rc = 0;
166                 break;
167         }
168 out:
169         spin_unlock(&imp->imp_lock);
170         if (rc == -ENOENT)
171                 CERROR("connection %s not found\n", uuid->uuid);
172         RETURN(rc);
173 }
174
175 int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf)
176 {
177         struct lustre_cfg* lcfg = buf;
178         struct client_obd *cli = &obddev->u.cli;
179         struct obd_import *imp;
180         struct obd_uuid server_uuid;
181         int rq_portal, rp_portal, connect_op;
182         char *name = obddev->obd_type->typ_name;
183         char *mgmt_name = NULL;
184         int rc;
185         ENTRY;
186
187         /* In a more perfect world, we would hang a ptlrpc_client off of
188          * obd_type and just use the values from there. */
189         if (!strcmp(name, OBD_OSC_DEVICENAME)) {
190                 rq_portal = OST_REQUEST_PORTAL;
191                 rp_portal = OSC_REPLY_PORTAL;
192                 connect_op = OST_CONNECT;
193         } else if (!strcmp(name, OBD_MDC_DEVICENAME)) {
194                 rq_portal = MDS_REQUEST_PORTAL;
195                 rp_portal = MDC_REPLY_PORTAL;
196                 connect_op = MDS_CONNECT;
197         } else if (!strcmp(name, OBD_MGMTCLI_DEVICENAME)) {
198                 rq_portal = MGMT_REQUEST_PORTAL;
199                 rp_portal = MGMT_REPLY_PORTAL;
200                 connect_op = MGMT_CONNECT;
201         } else if (!strcmp(name, LUSTRE_GKC_NAME)) {
202                 rq_portal = GKS_REQUEST_PORTAL;
203                 rp_portal = GKC_REPLY_PORTAL;
204                 connect_op = GKS_CONNECT;
205
206         } else {
207                 CERROR("unknown client OBD type \"%s\", can't setup\n",
208                        name);
209                 RETURN(-EINVAL);
210         }
211
212
213         if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
214                 CERROR("requires a TARGET UUID\n");
215                 RETURN(-EINVAL);
216         }
217
218         if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
219                 CERROR("client UUID must be less than 38 characters\n");
220                 RETURN(-EINVAL);
221         }
222
223         if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
224                 CERROR("setup requires a SERVER UUID\n");
225                 RETURN(-EINVAL);
226         }
227
228         if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
229                 CERROR("target UUID must be less than 38 characters\n");
230                 RETURN(-EINVAL);
231         }
232
233         sema_init(&cli->cl_sem, 1);
234         cli->cl_conn_count = 0;
235         memcpy(server_uuid.uuid,  lustre_cfg_buf(lcfg, 2),
236                min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2), 
237                sizeof(server_uuid)));
238
239         cli->cl_dirty = 0;
240         cli->cl_avail_grant = 0;
241         
242         /* FIXME: should limit this for the sum of all cl_dirty_max */
243         cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
244         if (cli->cl_dirty_max >> PAGE_SHIFT > num_physpages / 8)
245                 cli->cl_dirty_max = num_physpages << (PAGE_SHIFT - 3);
246
247         INIT_LIST_HEAD(&cli->cl_cache_waiters);
248         INIT_LIST_HEAD(&cli->cl_loi_ready_list);
249         INIT_LIST_HEAD(&cli->cl_loi_write_list);
250         INIT_LIST_HEAD(&cli->cl_loi_read_list);
251         spin_lock_init(&cli->cl_loi_list_lock);
252         cli->cl_r_in_flight = 0;
253         cli->cl_w_in_flight = 0;
254         spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
255         spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
256         spin_lock_init(&cli->cl_read_page_hist.oh_lock);
257         spin_lock_init(&cli->cl_write_page_hist.oh_lock);
258
259         memset(&cli->cl_last_write_time, 0,
260                sizeof(cli->cl_last_write_time));
261         
262         cli->cl_cache_wait_num = 0;
263         cli->cl_cache_wait_sum = 0;
264         cli->cl_write_gap_sum = 0;
265         cli->cl_write_gaps = 0;
266         cli->cl_write_num = 0;
267         cli->cl_read_num = 0;
268
269         cli->cl_dirty_num = 0;
270         cli->cl_dirty_sum = 0;
271         cli->cl_dirty_av = 0;
272         cli->cl_sync_rpcs = 0;
273         cli->cl_dirty_dmax = 0;
274         cli->cl_dirty_dmin = 0;
275
276         if (num_physpages >> (20 - PAGE_SHIFT) <= 128) { /* <= 128 MB */
277                 cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES / 4;
278                 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT / 4;
279 #if 0
280         } else if (num_physpages >> (20 - PAGE_SHIFT) <= 512) { /* <= 512 MB */
281                 cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES / 2;
282                 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT / 2;
283 #endif
284         } else {
285                 cli->cl_max_pages_per_rpc = PTLRPC_MAX_BRW_PAGES;
286                 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
287         }
288
289         rc = ldlm_get_ref();
290         if (rc) {
291                 CERROR("ldlm_get_ref failed: %d\n", rc);
292                 GOTO(err, rc);
293         }
294
295         ptlrpc_init_client(rq_portal, rp_portal, name,
296                            &obddev->obd_ldlm_client);
297
298         imp = class_new_import();
299         if (imp == NULL) 
300                 GOTO(err_ldlm, rc = -ENOENT);
301         imp->imp_client = &obddev->obd_ldlm_client;
302         imp->imp_obd = obddev;
303         imp->imp_connect_op = connect_op;
304         imp->imp_generation = 0;
305         imp->imp_initial_recov = 1;
306         INIT_LIST_HEAD(&imp->imp_pinger_chain);
307         memcpy(imp->imp_target_uuid.uuid, lustre_cfg_buf(lcfg, 1),
308                LUSTRE_CFG_BUFLEN(lcfg, 1));
309         class_import_put(imp);
310
311         rc = client_import_add_conn(imp, &server_uuid, 1);
312         if (rc) {
313                 CERROR("can't add initial connection\n");
314                 GOTO(err_import, rc);
315         }
316
317         cli->cl_import = imp;
318         cli->cl_max_mds_easize = sizeof(struct lov_mds_md);
319         cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
320         cli->cl_sandev = to_kdev_t(0);
321
322         if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
323                 if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
324                         CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
325                                name, obddev->obd_name,
326                                imp->imp_target_uuid.uuid);
327                         imp->imp_invalid = 1;
328
329                         if (LUSTRE_CFG_BUFLEN(lcfg, 4) > 0)
330                                 mgmt_name = lustre_cfg_string(lcfg, 4);
331                 } else {
332                         mgmt_name = lustre_cfg_string(lcfg, 3);
333                 }
334         }
335 #if 0
336         if (mgmt_name != NULL) {
337                 /* Register with management client if we need to. */
338                 CDEBUG(D_HA, "%s registering with %s for events about %s\n",
339                        obddev->obd_name, mgmt_name, server_uuid.uuid);
340
341                 mgmt_obd = class_name2obd(mgmt_name);
342                 if (!mgmt_obd) {
343                         CERROR("can't find mgmtcli %s to register\n",
344                                mgmt_name);
345                         GOTO(err_import, rc = -ENOSYS);
346                 }
347
348                 register_f = (mgmtcli_register_for_events_t)symbol_get("mgmtcli_register_for_events");
349                 if (!register_f) {
350                         CERROR("can't i_m_g mgmtcli_register_for_events\n");
351                         GOTO(err_import, rc = -ENOSYS);
352                 }
353
354                 rc = register_f(mgmt_obd, obddev, &imp->imp_target_uuid);
355                 symbol_put("mgmtcli_register_for_events");
356
357                 if (!rc)
358                         cli->cl_mgmtcli_obd = mgmt_obd;
359         }
360 #endif
361         RETURN(rc);
362
363 err_import:
364         class_destroy_import(imp);
365 err_ldlm:
366         ldlm_put_ref(0);
367 err:
368         RETURN(rc);
369
370 }
371
372 int client_obd_cleanup(struct obd_device *obddev, int flags)
373 {
374         struct client_obd *cli = &obddev->u.cli;
375         ENTRY;
376
377         if (!cli->cl_import)
378                 RETURN(-EINVAL);
379
380         if (cli->cl_mgmtcli_obd) {
381                 mgmtcli_deregister_for_events_t dereg_f;
382
383                 dereg_f = (mgmtcli_deregister_for_events_t)symbol_get("mgmtcli_deregister_for_events");
384                 dereg_f(cli->cl_mgmtcli_obd, obddev);
385                 symbol_put("mgmtcli_deregister_for_events");
386         }
387
388         /* Here we try to drop the security structure after destroy import,
389          * to avoid issue of "sleep in spinlock".
390          */
391         class_import_get(cli->cl_import);
392         class_destroy_import(cli->cl_import);
393         ptlrpcs_import_drop_sec(cli->cl_import);
394         class_import_put(cli->cl_import);
395         cli->cl_import = NULL;
396
397         ldlm_put_ref(flags & OBD_OPT_FORCE);
398         RETURN(0);
399 }
400
401 int client_connect_import(struct lustre_handle *dlm_handle,
402                           struct obd_device *obd,
403                           struct obd_uuid *cluuid,
404                           struct obd_connect_data *conn_data,
405                           unsigned long connect_flags)
406 {
407         struct client_obd *cli = &obd->u.cli;
408         struct obd_import *imp = cli->cl_import;
409         struct obd_export *exp;
410         int rc;
411         ENTRY;
412
413         down(&cli->cl_sem);
414         rc = class_connect(dlm_handle, obd, cluuid);
415         if (rc)
416                 GOTO(out_sem, rc);
417
418         cli->cl_conn_count++;
419         if (cli->cl_conn_count > 1)
420                 GOTO(out_sem, rc);
421         exp = class_conn2export(dlm_handle);
422
423         if (obd->obd_namespace != NULL)
424                 CERROR("already have namespace!\n");
425         obd->obd_namespace = ldlm_namespace_new(obd->obd_name,
426                                                 LDLM_NAMESPACE_CLIENT);
427         if (obd->obd_namespace == NULL)
428                 GOTO(out_disco, rc = -ENOMEM);
429
430         rc = ptlrpcs_import_get_sec(imp);
431         if (rc != 0)
432                 GOTO(out_ldlm, rc);
433
434         imp->imp_dlm_handle = *dlm_handle;
435         rc = ptlrpc_init_import(imp);
436         if (rc != 0) 
437                 GOTO(out_ldlm, rc);
438
439         imp->imp_connect_flags = connect_flags;
440         if (conn_data)
441                 memcpy(&imp->imp_connect_data, conn_data, sizeof(*conn_data));
442
443         rc = ptlrpc_connect_import(imp, NULL);
444         if (rc != 0) {
445                 LASSERT (imp->imp_state == LUSTRE_IMP_DISCON);
446                 GOTO(out_ldlm, rc);
447         }
448         LASSERT(exp->exp_connection);
449         ptlrpc_pinger_add_import(imp);
450         EXIT;
451
452         if (rc) {
453 out_ldlm:
454                 ldlm_namespace_free(obd->obd_namespace, 0);
455                 obd->obd_namespace = NULL;
456 out_disco:
457                 cli->cl_conn_count--;
458                 class_disconnect(exp, 0);
459         } else {
460                 class_export_put(exp);
461         }
462 out_sem:
463         up(&cli->cl_sem);
464         return rc;
465 }
466
467 int client_disconnect_export(struct obd_export *exp, unsigned long flags)
468 {
469         struct obd_device *obd = class_exp2obd(exp);
470         struct client_obd *cli = &obd->u.cli;
471         struct obd_import *imp = cli->cl_import;
472         int rc = 0, err;
473         ENTRY;
474
475         if (!obd) {
476                 CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
477                        exp, exp ? exp->exp_handle.h_cookie : -1);
478                 RETURN(-EINVAL);
479         }
480
481         down(&cli->cl_sem);
482         if (!cli->cl_conn_count) {
483                 CERROR("disconnecting disconnected device (%s)\n",
484                        obd->obd_name);
485                 GOTO(out_sem, rc = -EINVAL);
486         }
487
488         cli->cl_conn_count--;
489         if (cli->cl_conn_count)
490                 GOTO(out_no_disconnect, rc = 0);
491
492         /* Some non-replayable imports (MDS's OSCs) are pinged, so just
493          * delete it regardless.  (It's safe to delete an import that was
494          * never added.) */
495         (void)ptlrpc_pinger_del_import(imp);
496
497         if (obd->obd_namespace != NULL) {
498                 /* obd_no_recov == local only */
499                 ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
500                                        obd->obd_no_recov, NULL);
501                 ldlm_namespace_free(obd->obd_namespace, obd->obd_no_recov);
502                 obd->obd_namespace = NULL;
503         }
504
505         /* 
506          * Yeah, obd_no_recov also (mainly) means "forced shutdown".
507          */
508         if (obd->obd_no_recov)
509                 ptlrpc_invalidate_import(imp, 0);
510         else
511                 rc = ptlrpc_disconnect_import(imp);
512
513         EXIT;
514  out_no_disconnect:
515         err = class_disconnect(exp, 0);
516         if (!rc && err)
517                 rc = err;
518  out_sem:
519         up(&cli->cl_sem);
520         RETURN(rc);
521 }
522
523 /* --------------------------------------------------------------------------
524  * from old lib/target.c
525  * -------------------------------------------------------------------------- */
526
527 int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
528                             struct obd_uuid *cluuid, int initial_conn)
529 {
530         if (exp->exp_connection && !initial_conn) {
531                 struct lustre_handle *hdl;
532                 hdl = &exp->exp_imp_reverse->imp_remote_handle;
533                 /* Might be a re-connect after a partition. */
534                 if (!memcmp(&conn->cookie, &hdl->cookie, sizeof conn->cookie)) {
535                         CERROR("%s reconnecting\n", cluuid->uuid);
536                         conn->cookie = exp->exp_handle.h_cookie;
537                         RETURN(EALREADY);
538                 } else {
539                         CERROR("%s reconnecting from %s, "
540                                "handle mismatch (ours "LPX64", theirs "
541                                LPX64")\n", cluuid->uuid,
542                                exp->exp_connection->c_remote_uuid.uuid,
543                                hdl->cookie, conn->cookie);
544                         memset(conn, 0, sizeof *conn);
545                         RETURN(-EALREADY);
546                 }
547         }
548
549         conn->cookie = exp->exp_handle.h_cookie;
550         CDEBUG(D_INFO, "existing export for UUID '%s' at %p\n",
551                cluuid->uuid, exp);
552         CDEBUG(D_IOCTL,"connect: cookie "LPX64"\n", conn->cookie);
553         RETURN(0);
554 }
555
556 static inline int ptlrpc_peer_is_local(struct ptlrpc_peer *peer)
557 {
558         ptl_process_id_t myid;
559
560         PtlGetId(peer->peer_ni->pni_ni_h, &myid);
561         return (memcmp(&peer->peer_id, &myid, sizeof(myid)) == 0);
562 }
563
564 /* To check whether the p_flavor is in deny list or not
565  * rc:
566  *      0           not found, pass
567  *      EPERM       found, refuse
568  */
569
570 static int check_deny_list(struct list_head *head, __u32 flavor)
571 {
572         deny_sec_t *p_deny_sec = NULL;
573         deny_sec_t *n_deny_sec = NULL;
574
575         list_for_each_entry_safe(p_deny_sec, n_deny_sec, head, list) {
576                 if (p_deny_sec->flavor == flavor)
577                         return -EPERM;
578         }
579         return 0;
580 }
581
582 int target_check_deny_sec(struct obd_device *target, struct ptlrpc_request *req)
583 {
584         __u32 flavor;
585         int rc = 0;
586
587         flavor = req->rq_req_secflvr;
588
589         if (!strcmp(target->obd_type->typ_name, OBD_MDS_DEVICENAME)) {
590                 spin_lock(&target->u.mds.mds_denylist_lock);
591                 rc = check_deny_list(&target->u.mds.mds_denylist, flavor);
592                 spin_unlock(&target->u.mds.mds_denylist_lock);
593         } else if (!strcmp(target->obd_type->typ_name, OBD_FILTER_DEVICENAME)) {
594                 spin_lock(&target->u.filter.fo_denylist_lock);
595                 rc = check_deny_list(&target->u.filter.fo_denylist, flavor);
596                 spin_unlock(&target->u.filter.fo_denylist_lock);
597         }
598
599         return rc;
600 }
601
602 int target_handle_connect(struct ptlrpc_request *req)
603 {
604         unsigned long connect_flags = 0, *cfp;
605         struct obd_device *target;
606         struct obd_export *export = NULL;
607         struct obd_import *revimp;
608         struct lustre_handle conn;
609         struct obd_uuid tgtuuid;
610         struct obd_uuid cluuid;
611         struct obd_uuid remote_uuid;
612         struct list_head *p;
613         struct obd_connect_data *conn_data;
614         int conn_data_size = sizeof(*conn_data);
615         char *str, *tmp;
616         int rc = 0;
617         unsigned long flags;
618         int initial_conn = 0;
619         char peer_str[PTL_NALFMT_SIZE];
620         const int offset = 1;
621         ENTRY;
622
623         OBD_RACE(OBD_FAIL_TGT_CONN_RACE); 
624
625         LASSERT_REQSWAB (req, offset + 0);
626         str = lustre_msg_string(req->rq_reqmsg, offset + 0,
627                                 sizeof(tgtuuid) - 1);
628         if (str == NULL) {
629                 CERROR("bad target UUID for connect\n");
630                 GOTO(out, rc = -EINVAL);
631         }
632
633         obd_str2uuid (&tgtuuid, str);
634         target = class_uuid2obd(&tgtuuid);
635         if (!target)
636                 target = class_name2obd(str);
637         
638         if (!target || target->obd_stopping || !target->obd_set_up) {
639                 CERROR("UUID '%s' is not available for connect from %s\n",
640                        str, req->rq_peerstr);
641                 GOTO(out, rc = -ENODEV);
642         }
643
644         /* check the secure deny list of mds/ost */
645         rc = target_check_deny_sec(target, req);
646         if (rc != 0)
647                 GOTO(out, rc);
648
649         LASSERT_REQSWAB (req, offset + 1);
650         str = lustre_msg_string(req->rq_reqmsg, offset + 1, sizeof(cluuid) - 1);
651         if (str == NULL) {
652                 CERROR("bad client UUID for connect\n");
653                 GOTO(out, rc = -EINVAL);
654         }
655
656         obd_str2uuid (&cluuid, str);
657
658         /* XXX extract a nettype and format accordingly */
659         switch (sizeof(ptl_nid_t)) {
660                 /* NB the casts only avoid compiler warnings */
661         case 8:
662                 snprintf((char *)remote_uuid.uuid, sizeof(remote_uuid),
663                          "NET_"LPX64"_UUID", (__u64)req->rq_peer.peer_id.nid);
664                 break;
665         case 4:
666                 snprintf((char *)remote_uuid.uuid, sizeof(remote_uuid),
667                          "NET_%x_UUID", (__u32)req->rq_peer.peer_id.nid);
668                 break;
669         default:
670                 LBUG();
671         }
672
673         tmp = lustre_msg_buf(req->rq_reqmsg, offset + 2, sizeof(conn));
674         if (tmp == NULL)
675                 GOTO(out, rc = -EPROTO);
676
677         memcpy(&conn, tmp, sizeof conn);
678
679         cfp = lustre_msg_buf(req->rq_reqmsg, offset + 3, sizeof(unsigned long));
680         LASSERT(cfp != NULL);
681         connect_flags = *cfp;
682
683         conn_data = lustre_swab_reqbuf(req, offset + 4, sizeof(*conn_data),
684                                        lustre_swab_connect);
685         if (!conn_data)
686                 GOTO(out, rc = -EPROTO);
687
688         rc = lustre_pack_reply(req, 1, &conn_data_size, NULL);
689         if (rc)
690                 GOTO(out, rc);
691         
692         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_INITIAL)
693                 initial_conn = 1;
694         
695         /* lctl gets a backstage, all-access pass. */
696         if (obd_uuid_equals(&cluuid, &target->obd_uuid))
697                 goto dont_check_exports;
698
699         spin_lock(&target->obd_dev_lock);
700         list_for_each(p, &target->obd_exports) {
701                 export = list_entry(p, struct obd_export, exp_obd_chain);
702                 if (obd_uuid_equals(&cluuid, &export->exp_client_uuid)) {
703                         spin_unlock(&target->obd_dev_lock);
704                         LASSERT(export->exp_obd == target);
705
706                         rc = target_handle_reconnect(&conn, export, &cluuid,
707                                                      initial_conn);
708                         break;
709                 }
710                 export = NULL;
711         }
712         /* If we found an export, we already unlocked. */
713         if (!export) {
714                 spin_unlock(&target->obd_dev_lock);
715         } else if (req->rq_export == NULL && 
716                    atomic_read(&export->exp_rpc_count) > 0) {
717                 CWARN("%s: refuse connection from %s/%s to 0x%p/%d\n",
718                       target->obd_name, cluuid.uuid,
719                       ptlrpc_peernid2str(&req->rq_peer, peer_str),
720                       export, atomic_read(&export->exp_refcount));
721                 GOTO(out, rc = -EBUSY);
722         } else if (req->rq_export != NULL &&
723                    atomic_read(&export->exp_rpc_count) > 1) {
724                 CWARN("%s: refuse reconnection from %s@%s to 0x%p/%d\n",
725                       target->obd_name, cluuid.uuid,
726                       ptlrpc_peernid2str(&req->rq_peer, peer_str),
727                       export, atomic_read(&export->exp_rpc_count));
728                 GOTO(out, rc = -EBUSY);
729         } else if (req->rq_reqmsg->conn_cnt == 1 && !initial_conn) {
730                 CERROR("%s reconnected with 1 conn_cnt; cookies not random?\n",
731                        cluuid.uuid);
732                 GOTO(out, rc = -EALREADY);
733         }
734
735         /* Tell the client if we're in recovery. */
736         /* If this is the first client, start the recovery timer */
737         CWARN("%s: connection from %s@%s/%lu %st"LPU64"\n", target->obd_name,
738               cluuid.uuid, ptlrpc_peernid2str(&req->rq_peer, peer_str), *cfp,
739               target->obd_recovering ? "recovering/" : "", conn_data->transno);
740
741         if (target->obd_recovering) {
742                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECOVERING);
743                 target_start_recovery_timer(target);
744         }
745
746 #if 0
747         /* Tell the client if we support replayable requests */
748         if (target->obd_replayable)
749                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_REPLAYABLE);
750 #endif
751
752         if (export == NULL) {
753                 if (target->obd_recovering) {
754                         CERROR("%s denying connection for new client %s@%s: "
755                                "%d clients in recovery for %lds\n", target->obd_name, 
756                                cluuid.uuid,
757                                ptlrpc_peernid2str(&req->rq_peer, peer_str),
758                                target->obd_recoverable_clients,
759                                (target->obd_recovery_timer.expires-jiffies)/HZ);
760                         rc = -EBUSY;
761                 } else {
762  dont_check_exports:
763                         rc = obd_connect(&conn, target, &cluuid, conn_data,
764                                          connect_flags);
765                 }
766         }
767
768         /* Return only the parts of obd_connect_data that we understand, so the
769          * client knows that we don't understand the rest. */
770         conn_data->ocd_connect_flags &= OBD_CONNECT_SUPPORTED;
771         memcpy(lustre_msg_buf(req->rq_repmsg, 0, sizeof(*conn_data)), conn_data,
772                sizeof(*conn_data));
773
774         /* Tell the client if we support replayable requests */
775         if (target->obd_replayable)
776                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_REPLAYABLE);
777
778         /* If all else goes well, this is our RPC return code. */
779         req->rq_status = 0;
780
781         if (rc && rc != EALREADY)
782                 GOTO(out, rc);
783
784         req->rq_repmsg->handle = conn;
785
786         /* If the client and the server are the same node, we will already
787          * have an export that really points to the client's DLM export,
788          * because we have a shared handles table.
789          *
790          * XXX this will go away when shaver stops sending the "connect" handle
791          * in the real "remote handle" field of the request --phik 24 Apr 2003
792          */
793         if (req->rq_export != NULL)
794                 class_export_put(req->rq_export);
795
796         /* ownership of this export ref transfers to the request */
797         export = req->rq_export = class_conn2export(&conn);
798         LASSERT(export != NULL);
799
800         spin_lock_irqsave(&export->exp_lock, flags);
801         if (initial_conn) {
802                 req->rq_repmsg->conn_cnt = export->exp_conn_cnt + 1;
803         } else if (export->exp_conn_cnt >= req->rq_reqmsg->conn_cnt) {
804                 CERROR("%s@%s: already connected at a higher conn_cnt: %d > %d\n",
805                        cluuid.uuid, ptlrpc_peernid2str(&req->rq_peer, peer_str),
806                        export->exp_conn_cnt, 
807                        req->rq_reqmsg->conn_cnt);
808                 spin_unlock_irqrestore(&export->exp_lock, flags);
809                 GOTO(out, rc = -EALREADY);
810         } 
811         export->exp_conn_cnt = req->rq_reqmsg->conn_cnt;
812         spin_unlock_irqrestore(&export->exp_lock, flags);
813
814         /* request from liblustre? */
815         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT)
816                 export->exp_libclient = 1;
817
818         if (!(lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_ASYNC) &&
819             ptlrpc_peer_is_local(&req->rq_peer)) {
820                 CWARN("%s: exp %p set sync\n", target->obd_name, export);
821                 export->exp_sync = 1;
822         } else {
823                 CDEBUG(D_HA, "%s: exp %p set async\n",target->obd_name,export);
824                 export->exp_sync = 0;
825         }
826
827         if (export->exp_connection != NULL)
828                 ptlrpc_put_connection(export->exp_connection);
829         export->exp_connection = ptlrpc_get_connection(&req->rq_peer,
830                                                        &remote_uuid);
831
832         if (rc == EALREADY) {
833                 /* We indicate the reconnection in a flag, not an error code. */
834                 lustre_msg_add_op_flags(req->rq_repmsg, MSG_CONNECT_RECONNECT);
835                 GOTO(out, rc = 0);
836         }
837
838         spin_lock_bh(&target->obd_processing_task_lock);
839         if (target->obd_recovering && export->exp_connected == 0) {
840                 __u64 t = conn_data->transno;
841                 export->exp_connected = 1;
842                 if ((lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_TRANSNO)
843                                 && t < target->obd_next_recovery_transno)
844                         target->obd_next_recovery_transno = t;
845                 target->obd_connected_clients++;
846                 if (target->obd_connected_clients == target->obd_max_recoverable_clients)
847                         wake_up(&target->obd_next_transno_waitq);
848         }
849         spin_unlock_bh(&target->obd_processing_task_lock);
850
851         memcpy(&conn, lustre_msg_buf(req->rq_reqmsg, offset + 2, sizeof(conn)),
852                sizeof(conn));
853
854         if (export->exp_imp_reverse != NULL) {
855                 /* same logic as client_obd_cleanup */
856                 class_import_get(export->exp_imp_reverse);
857                 class_destroy_import(export->exp_imp_reverse);
858                 ptlrpcs_import_drop_sec(export->exp_imp_reverse);
859                 class_import_put(export->exp_imp_reverse);
860         }
861
862         /* for the rest part, we return -ENOTCONN in case of errors
863          * in order to let client initialize connection again.
864          */
865         revimp = export->exp_imp_reverse = class_new_import();
866         if (!revimp) {
867                 CERROR("fail to alloc new reverse import.\n");
868                 GOTO(out, rc = -ENOTCONN);
869         }
870
871         revimp->imp_connection = ptlrpc_connection_addref(export->exp_connection);
872         revimp->imp_client = &export->exp_obd->obd_ldlm_client;
873         revimp->imp_remote_handle = conn;
874         revimp->imp_obd = target;
875         revimp->imp_dlm_fake = 1;
876         revimp->imp_state = LUSTRE_IMP_FULL;
877
878         rc = ptlrpcs_import_get_sec(revimp);
879         if (rc) {
880                 CERROR("reverse import can not get sec: %d\n", rc);
881                 class_destroy_import(revimp);
882                 export->exp_imp_reverse = NULL;
883                 GOTO(out, rc = -ENOTCONN);
884         }
885
886         class_import_put(revimp);
887
888         rc = obd_connect_post(export, initial_conn, connect_flags);
889 out:
890         if (rc)
891                 req->rq_status = rc;
892         RETURN(rc);
893 }
894
895 int target_handle_disconnect(struct ptlrpc_request *req)
896 {
897         struct obd_export *exp;
898         int rc;
899         ENTRY;
900
901         rc = lustre_pack_reply(req, 0, NULL, NULL);
902         if (rc)
903                 RETURN(rc);
904
905         /* keep the rq_export around so we can send the reply */
906         exp = class_export_get(req->rq_export);
907         req->rq_status = obd_disconnect(exp, 0);
908         RETURN(0);
909 }
910
911 void target_destroy_export(struct obd_export *exp)
912 {
913         /* exports created from last_rcvd data, and "fake"
914            exports created by lctl don't have an import */
915         if (exp->exp_imp_reverse != NULL) {
916                 ptlrpcs_import_drop_sec(exp->exp_imp_reverse);
917                 class_destroy_import(exp->exp_imp_reverse);
918         }
919
920         /* We cancel locks at disconnect time, but this will catch any locks
921          * granted in a race with recovery-induced disconnect. */
922         if (exp->exp_obd->obd_namespace != NULL)
923                 ldlm_cancel_locks_for_export(exp);
924 }
925
926 /*
927  * Recovery functions
928  */
929
930 struct ptlrpc_request *
931 ptlrpc_clone_req( struct ptlrpc_request *orig_req) 
932 {
933         struct ptlrpc_request *copy_req;
934         struct lustre_msg *copy_reqmsg;
935
936         OBD_ALLOC(copy_req, sizeof *copy_req);
937         if (!copy_req)
938                 return NULL;
939         OBD_ALLOC(copy_reqmsg, orig_req->rq_reqlen);
940         if (!copy_reqmsg){
941                 OBD_FREE(copy_req, sizeof *copy_req);
942                 return NULL;
943         }
944
945         memcpy(copy_req, orig_req, sizeof *copy_req);
946         memcpy(copy_reqmsg, orig_req->rq_reqmsg, orig_req->rq_reqlen);
947         /* the copied req takes over the reply state and security data */
948         orig_req->rq_reply_state = NULL;
949         orig_req->rq_svcsec_data = NULL;
950
951         copy_req->rq_reqmsg = copy_reqmsg;
952         class_export_get(copy_req->rq_export);
953         INIT_LIST_HEAD(&copy_req->rq_list);
954
955         return copy_req;
956 }
957
958 void ptlrpc_free_clone( struct ptlrpc_request *req) 
959 {
960         if (req->rq_svcsec)
961                 svcsec_cleanup_req(req);
962
963         class_export_put(req->rq_export);
964         list_del(&req->rq_list);
965         OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
966         OBD_FREE(req, sizeof *req);
967 }
968
969 static void target_release_saved_req(struct ptlrpc_request *req)
970 {
971         if (req->rq_svcsec)
972                 svcsec_cleanup_req(req);
973
974         class_export_put(req->rq_export);
975         OBD_FREE(req->rq_reqmsg, req->rq_reqlen);
976         OBD_FREE(req, sizeof *req);
977 }
978
979 static void target_finish_recovery(struct obd_device *obd)
980 {
981         int rc;
982
983         ldlm_reprocess_all_ns(obd->obd_namespace);
984
985         /* when recovery finished, cleanup orphans on mds and ost */
986         if (OBT(obd) && OBP(obd, postrecov)) {
987                 rc = OBP(obd, postrecov)(obd);
988                 if (rc >= 0)
989                         CWARN("%s: all clients recovered, %d MDS "
990                               "orphans deleted\n", obd->obd_name, rc);
991                 else
992                         CERROR("postrecov failed %d\n", rc);
993         }
994
995         obd->obd_recovery_end = LTIME_S(CURRENT_TIME);
996         return;
997 }
998
999 static void abort_req_replay_queue(struct obd_device *obd)
1000 {
1001         struct ptlrpc_request *req;
1002         struct list_head *tmp, *n;
1003         int rc;
1004
1005         list_for_each_safe(tmp, n, &obd->obd_req_replay_queue) {
1006                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1007                 list_del(&req->rq_list);
1008                 DEBUG_REQ(D_ERROR, req, "aborted:");
1009                 req->rq_status = -ENOTCONN;
1010                 req->rq_type = PTL_RPC_MSG_ERR;
1011                 rc = lustre_pack_reply(req, 0, NULL, NULL);
1012                 if (rc == 0) {
1013                         ptlrpc_reply(req);
1014                 } else {
1015                         DEBUG_REQ(D_ERROR, req,
1016                                   "packing failed for abort-reply; skipping");
1017                 }
1018                 target_release_saved_req(req);
1019         }
1020 }
1021
1022 static void abort_lock_replay_queue(struct obd_device *obd)
1023 {
1024         struct ptlrpc_request *req;
1025         struct list_head *tmp, *n;
1026         int rc;
1027
1028         list_for_each_safe(tmp, n, &obd->obd_lock_replay_queue) {
1029                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1030                 list_del(&req->rq_list);
1031                 DEBUG_REQ(D_ERROR, req, "aborted:");
1032                 req->rq_status = -ENOTCONN;
1033                 req->rq_type = PTL_RPC_MSG_ERR;
1034                 rc = lustre_pack_reply(req, 0, NULL, NULL);
1035                 if (rc == 0) {
1036                         ptlrpc_reply(req);
1037                 } else {
1038                         DEBUG_REQ(D_ERROR, req,
1039                                   "packing failed for abort-reply; skipping");
1040                 }
1041                 target_release_saved_req(req);
1042         }
1043 }
1044
1045 /* Called from a cleanup function if the device is being cleaned up
1046    forcefully.  The exports should all have been disconnected already,
1047    the only thing left to do is
1048      - clear the recovery flags
1049      - cancel the timer
1050      - free queued requests and replies, but don't send replies
1051    Because the obd_stopping flag is set, no new requests should be received.
1052
1053 */
1054 void target_cleanup_recovery(struct obd_device *obd)
1055 {
1056         struct list_head *tmp, *n;
1057         struct ptlrpc_request *req;
1058
1059         spin_lock_bh(&obd->obd_processing_task_lock);
1060         if (!obd->obd_recovering) {
1061                 spin_unlock_bh(&obd->obd_processing_task_lock);
1062                 EXIT;
1063                 return;
1064         }
1065         obd->obd_recovering = obd->obd_abort_recovery = 0;
1066         target_cancel_recovery_timer(obd);
1067         spin_unlock_bh(&obd->obd_processing_task_lock);
1068
1069         list_for_each_safe(tmp, n, &obd->obd_req_replay_queue) {
1070                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1071                 list_del(&req->rq_list);
1072                 LASSERT (req->rq_reply_state == 0);
1073                 target_release_saved_req(req);
1074         }
1075         list_for_each_safe(tmp, n, &obd->obd_lock_replay_queue) {
1076                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1077                 list_del(&req->rq_list);
1078                 LASSERT (req->rq_reply_state == 0);
1079                 target_release_saved_req(req);
1080         }
1081         list_for_each_safe(tmp, n, &obd->obd_final_req_queue) {
1082                 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1083                 list_del(&req->rq_list);
1084                 LASSERT (req->rq_reply_state == 0);
1085                 target_release_saved_req(req);
1086         }
1087 }
1088
1089 #if 0
1090 static void target_abort_recovery(void *data)
1091 {
1092         struct obd_device *obd = data;
1093
1094         LASSERT(!obd->obd_recovering);
1095
1096         class_disconnect_stale_exports(obd, 0);
1097
1098         CERROR("%s: recovery period over; disconnecting unfinished clients.\n",
1099                obd->obd_name);
1100
1101         abort_recovery_queue(obd);
1102         target_finish_recovery(obd);
1103         ptlrpc_run_recovery_over_upcall(obd);
1104 }
1105 #endif
1106
1107 static void target_recovery_expired(unsigned long castmeharder)
1108 {
1109         struct obd_device *obd = (struct obd_device *)castmeharder;
1110         spin_lock_bh(&obd->obd_processing_task_lock);
1111         if (obd->obd_recovering)
1112                 obd->obd_abort_recovery = 1;
1113
1114         wake_up(&obd->obd_next_transno_waitq);
1115         spin_unlock_bh(&obd->obd_processing_task_lock);
1116 }
1117
1118
1119 /* obd_processing_task_lock should be held */
1120 void target_cancel_recovery_timer(struct obd_device *obd)
1121 {
1122         CDEBUG(D_HA, "%s: cancel recovery timer\n", obd->obd_name);
1123         del_timer(&obd->obd_recovery_timer);
1124 }
1125
1126 #ifdef __KERNEL__
1127 static void reset_recovery_timer(struct obd_device *obd)
1128 {
1129         spin_lock_bh(&obd->obd_processing_task_lock);
1130         if (!obd->obd_recovering) {
1131                 spin_unlock_bh(&obd->obd_processing_task_lock);
1132                 return;
1133         }                
1134         CDEBUG(D_HA, "timer will expire in %u seconds\n",
1135                OBD_RECOVERY_TIMEOUT / HZ);
1136         mod_timer(&obd->obd_recovery_timer, jiffies + OBD_RECOVERY_TIMEOUT);
1137         spin_unlock_bh(&obd->obd_processing_task_lock);
1138 }
1139 #endif
1140
1141 /* Only start it the first time called */
1142 void target_start_recovery_timer(struct obd_device *obd)
1143 {
1144         spin_lock_bh(&obd->obd_processing_task_lock);
1145         if (!obd->obd_recovering || timer_pending(&obd->obd_recovery_timer)) {
1146                 spin_unlock_bh(&obd->obd_processing_task_lock);
1147                 return;
1148         }
1149         CWARN("%s: starting recovery timer (%us)\n", obd->obd_name,
1150                OBD_RECOVERY_TIMEOUT / HZ);
1151         obd->obd_recovery_timer.function = target_recovery_expired;
1152         obd->obd_recovery_timer.data = (unsigned long)obd;
1153         mod_timer(&obd->obd_recovery_timer, jiffies + OBD_RECOVERY_TIMEOUT);
1154         spin_unlock_bh(&obd->obd_processing_task_lock);
1155 }
1156
1157 #ifdef __KERNEL__
1158 static int check_for_next_transno(struct obd_device *obd)
1159 {
1160         struct ptlrpc_request *req = NULL;
1161         int wake_up = 0, connected, completed, queue_len, max;
1162         __u64 next_transno, req_transno;
1163
1164         spin_lock_bh(&obd->obd_processing_task_lock);
1165         if (!list_empty(&obd->obd_req_replay_queue)) {
1166                 req = list_entry(obd->obd_req_replay_queue.next,
1167                                  struct ptlrpc_request, rq_list);
1168                 req_transno = req->rq_reqmsg->transno;
1169         } else {
1170                 req_transno = 0;
1171         }
1172
1173         max = obd->obd_max_recoverable_clients;
1174         connected = obd->obd_connected_clients;
1175         completed = max - obd->obd_recoverable_clients;
1176         queue_len = obd->obd_requests_queued_for_recovery;
1177         next_transno = obd->obd_next_recovery_transno;
1178
1179         CDEBUG(D_HA,"max: %d, connected: %d, completed: %d, queue_len: %d, "
1180                "req_transno: "LPU64", next_transno: "LPU64"\n",
1181                max, connected, completed, queue_len, req_transno, next_transno);
1182         if (obd->obd_abort_recovery) {
1183                 CDEBUG(D_HA, "waking for aborted recovery\n");
1184                 wake_up = 1;
1185         } else if (atomic_read(&obd->obd_req_replay_clients) == 0) {
1186                 CDEBUG(D_HA, "waking for completed recovery\n");
1187                 wake_up = 1;
1188         } else if (req_transno == next_transno) {
1189                 CDEBUG(D_HA, "waking for next ("LPD64")\n", next_transno);
1190                 wake_up = 1;
1191         } else if (queue_len + completed == max) {
1192                 LASSERT(req->rq_reqmsg->transno >= next_transno);
1193                 CDEBUG(req_transno > obd->obd_last_committed ? D_ERROR : D_HA,
1194                        "waking for skipped transno (skip: "LPD64
1195                        ", ql: %d, comp: %d, conn: %d, next: "LPD64")\n",
1196                        next_transno, queue_len, completed, max, req_transno);
1197                 obd->obd_next_recovery_transno = req_transno;
1198                 wake_up = 1;
1199         } else if (queue_len == atomic_read(&obd->obd_req_replay_clients)) {
1200                 /* some clients haven't connected in time, but we can try
1201                  * to replay requests that demand on already committed ones
1202                  * also, we can replay first non-committed transation */
1203                 LASSERT(req_transno != 0);
1204                 if (req_transno == obd->obd_last_committed + 1) {
1205                         obd->obd_next_recovery_transno = req_transno;
1206                 } else if (req_transno > obd->obd_last_committed) {
1207                         /* can't continue recovery: have no needed transno */
1208                         obd->obd_abort_recovery = 1;
1209                         CDEBUG(D_ERROR, "abort due to missed clients. max: %d, "
1210                                "connected: %d, completed: %d, queue_len: %d, "
1211                                "req_transno: "LPU64", next_transno: "LPU64"\n",
1212                                max, connected, completed, queue_len,
1213                                req_transno, next_transno);
1214                 }
1215                 wake_up = 1;
1216         }
1217         spin_unlock_bh(&obd->obd_processing_task_lock);
1218         
1219         return wake_up;
1220 }
1221
1222 static struct ptlrpc_request *
1223 target_next_replay_req(struct obd_device *obd)
1224 {
1225         struct l_wait_info lwi = { 0 };
1226         struct ptlrpc_request *req;
1227
1228         CDEBUG(D_HA, "Waiting for transno "LPD64"\n",
1229                obd->obd_next_recovery_transno);
1230         l_wait_event(obd->obd_next_transno_waitq,
1231                      check_for_next_transno(obd), &lwi);
1232         
1233         spin_lock_bh(&obd->obd_processing_task_lock);
1234         if (obd->obd_abort_recovery) {
1235                 req = NULL;
1236         } else if (!list_empty(&obd->obd_req_replay_queue)) {
1237                 req = list_entry(obd->obd_req_replay_queue.next,
1238                                  struct ptlrpc_request, rq_list);
1239                 list_del_init(&req->rq_list);
1240                 obd->obd_requests_queued_for_recovery--;
1241         } else {
1242                 req = NULL;
1243         }
1244         spin_unlock_bh(&obd->obd_processing_task_lock);
1245         return req;
1246 }
1247
1248 static int check_for_next_lock(struct obd_device *obd)
1249 {
1250         struct ptlrpc_request *req = NULL;
1251         int wake_up = 0;
1252
1253         spin_lock_bh(&obd->obd_processing_task_lock);
1254         if (!list_empty(&obd->obd_lock_replay_queue)) {
1255                 req = list_entry(obd->obd_lock_replay_queue.next,
1256                                  struct ptlrpc_request, rq_list);
1257                 CDEBUG(D_HA, "waking for next lock\n");
1258                 wake_up = 1;
1259         } else if (atomic_read(&obd->obd_lock_replay_clients) == 0) {
1260                 CDEBUG(D_HA, "waking for completed lock replay\n");
1261                 wake_up = 1;
1262         } else if (obd->obd_abort_recovery) {
1263                 CDEBUG(D_HA, "waking for aborted recovery\n");
1264                 wake_up = 1;
1265         }
1266         spin_unlock_bh(&obd->obd_processing_task_lock);
1267         
1268         return wake_up;
1269 }
1270
1271 static struct ptlrpc_request *
1272 target_next_replay_lock(struct obd_device *obd)
1273 {
1274         struct l_wait_info lwi = { 0 };
1275         struct ptlrpc_request *req;
1276
1277         CDEBUG(D_HA, "Waiting for lock\n");
1278         l_wait_event(obd->obd_next_transno_waitq,
1279                      check_for_next_lock(obd), &lwi);
1280         
1281         spin_lock_bh(&obd->obd_processing_task_lock);
1282         if (obd->obd_abort_recovery) {
1283                 req = NULL;
1284         } else if (!list_empty(&obd->obd_lock_replay_queue)) {
1285                 req = list_entry(obd->obd_lock_replay_queue.next,
1286                                  struct ptlrpc_request, rq_list);
1287                 list_del_init(&req->rq_list);
1288         } else {
1289                 req = NULL;
1290         }
1291         spin_unlock_bh(&obd->obd_processing_task_lock);
1292         return req;
1293 }
1294
1295 static struct ptlrpc_request *
1296 target_next_final_ping(struct obd_device *obd)
1297 {
1298         struct ptlrpc_request *req;
1299
1300         spin_lock_bh(&obd->obd_processing_task_lock);
1301         if (!list_empty(&obd->obd_final_req_queue)) {
1302                 req = list_entry(obd->obd_final_req_queue.next,
1303                                  struct ptlrpc_request, rq_list);
1304                 list_del_init(&req->rq_list);
1305         } else {
1306                 req = NULL;
1307         }
1308         spin_unlock_bh(&obd->obd_processing_task_lock);
1309         return req;
1310 }
1311
1312 static int req_replay_done(struct obd_export *exp)
1313 {
1314         if (exp->exp_req_replay_needed)
1315                 return 0;
1316         return 1;
1317 }
1318
1319 static int lock_replay_done(struct obd_export *exp)
1320 {
1321         if (exp->exp_lock_replay_needed)
1322                 return 0;
1323         return 1;
1324 }
1325
1326 static int connect_done(struct obd_export *exp)
1327 {
1328         if (exp->exp_connected)
1329                 return 1;
1330         return 0;
1331 }
1332
1333 static int check_for_clients(struct obd_device *obd)
1334 {
1335         if (obd->obd_abort_recovery)
1336                 return 1;
1337         LASSERT(obd->obd_connected_clients <= obd->obd_max_recoverable_clients);
1338         if (obd->obd_connected_clients == obd->obd_max_recoverable_clients)
1339                 return 1;
1340         return 0;
1341 }
1342
1343 static int target_recovery_thread(void *arg)
1344 {
1345         struct obd_device *obd = arg;
1346         struct ptlrpc_request *req;
1347         struct target_recovery_data *trd = &obd->obd_recovery_data;
1348         char peer_str[PTL_NALFMT_SIZE];
1349         struct l_wait_info lwi = { 0 };
1350         unsigned long delta;
1351         unsigned long flags;
1352         ENTRY;
1353
1354         kportal_daemonize("tgt-recov");
1355
1356         SIGNAL_MASK_LOCK(current, flags);
1357         sigfillset(&current->blocked);
1358         RECALC_SIGPENDING;
1359         SIGNAL_MASK_UNLOCK(current, flags);
1360
1361         CERROR("%s: started recovery thread pid %d\n", obd->obd_name, 
1362                current->pid);
1363         trd->trd_processing_task = current->pid;
1364
1365         obd->obd_recovering = 1;
1366         complete(&trd->trd_starting);
1367
1368         /* first of all, we have to know the first transno to replay */
1369         obd->obd_abort_recovery = 0;
1370         l_wait_event(obd->obd_next_transno_waitq,
1371                      check_for_clients(obd), &lwi);
1372         
1373         spin_lock_bh(&obd->obd_processing_task_lock);
1374         target_cancel_recovery_timer(obd);
1375         spin_unlock_bh(&obd->obd_processing_task_lock);
1376
1377         /* If some clients haven't connected in time, evict them */
1378         if (obd->obd_abort_recovery) {
1379                 int stale;
1380                 CDEBUG(D_ERROR, "few clients haven't connect in time (%d/%d),"
1381                        "evict them ...\n", obd->obd_connected_clients,
1382                        obd->obd_max_recoverable_clients);
1383                 obd->obd_abort_recovery = 0;
1384                 stale = class_disconnect_stale_exports(obd, connect_done, 0);
1385                 atomic_sub(stale, &obd->obd_req_replay_clients);
1386                 atomic_sub(stale, &obd->obd_lock_replay_clients);
1387         }
1388
1389         /* next stage: replay requests */
1390         delta = jiffies;
1391         obd->obd_req_replaying = 1;
1392         CDEBUG(D_ERROR, "1: request replay stage - %d clients from t"LPU64"\n",
1393               atomic_read(&obd->obd_req_replay_clients),
1394               obd->obd_next_recovery_transno);
1395         while ((req = target_next_replay_req(obd))) {
1396                 LASSERT(trd->trd_processing_task == current->pid);
1397                 DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s", 
1398                           req->rq_reqmsg->transno, 
1399                           ptlrpc_peernid2str(&req->rq_peer, peer_str));
1400                 (void)trd->trd_recovery_handler(req);
1401                 obd->obd_replayed_requests++;
1402                 reset_recovery_timer(obd);
1403                 /* bug 1580: decide how to properly sync() in recovery*/
1404                 //mds_fsync_super(mds->mds_sb);
1405                 ptlrpc_free_clone(req);
1406                 spin_lock_bh(&obd->obd_processing_task_lock);
1407                 obd->obd_next_recovery_transno++;
1408                 spin_unlock_bh(&obd->obd_processing_task_lock);
1409         }
1410
1411         spin_lock_bh(&obd->obd_processing_task_lock);
1412         target_cancel_recovery_timer(obd);
1413         spin_unlock_bh(&obd->obd_processing_task_lock);
1414
1415         /* If some clients haven't replayed requests in time, evict them */
1416         if (obd->obd_abort_recovery) {
1417                 int stale;
1418                 CDEBUG(D_ERROR, "req replay timed out, aborting ...\n");
1419                 obd->obd_abort_recovery = 0;
1420                 stale = class_disconnect_stale_exports(obd, req_replay_done, 0);
1421                 atomic_sub(stale, &obd->obd_lock_replay_clients);
1422                 abort_req_replay_queue(obd);
1423                 /* XXX for debuggin tests 11 and 17 */
1424                 LBUG();
1425         }
1426
1427         /* The second stage: replay locks */
1428         CDEBUG(D_ERROR, "2: lock replay stage - %d clients\n",
1429               atomic_read(&obd->obd_lock_replay_clients));
1430         while ((req = target_next_replay_lock(obd))) {
1431                 LASSERT(trd->trd_processing_task == current->pid);
1432                 DEBUG_REQ(D_HA, req, "processing lock from %s: ", 
1433                           ptlrpc_peernid2str(&req->rq_peer, peer_str));
1434                 (void)trd->trd_recovery_handler(req);
1435                 reset_recovery_timer(obd);
1436                 ptlrpc_free_clone(req);
1437                 obd->obd_replayed_locks++;
1438         }
1439         
1440         spin_lock_bh(&obd->obd_processing_task_lock);
1441         target_cancel_recovery_timer(obd);
1442         spin_unlock_bh(&obd->obd_processing_task_lock);
1443
1444         /* If some clients haven't replayed requests in time, evict them */
1445         if (obd->obd_abort_recovery) {
1446                 int stale;
1447                 CERROR("lock replay timed out, aborting ...\n");
1448                 obd->obd_abort_recovery = 0;
1449                 stale = class_disconnect_stale_exports(obd, lock_replay_done, 0);
1450                 abort_lock_replay_queue(obd);
1451         }
1452
1453         /* We drop recoverying flag to forward all new requests
1454          * to regular mds_handle() since now */
1455         spin_lock_bh(&obd->obd_processing_task_lock);
1456         obd->obd_recovering = 0;
1457         spin_unlock_bh(&obd->obd_processing_task_lock);
1458
1459         /* The third stage: reply on final pings */
1460         CDEBUG(D_ERROR, "3: final stage - process recovery completion pings\n");
1461         while ((req = target_next_final_ping(obd))) {
1462                 LASSERT(trd->trd_processing_task == current->pid);
1463                 DEBUG_REQ(D_HA, req, "processing final ping from %s: ", 
1464                           ptlrpc_peernid2str(&req->rq_peer, peer_str));
1465                 (void)trd->trd_recovery_handler(req);
1466                 ptlrpc_free_clone(req);
1467         }
1468        
1469         delta = (jiffies - delta) / HZ;
1470         CDEBUG(D_ERROR,"4: recovery completed in %lus - %d/%d reqs/locks\n",
1471               delta, obd->obd_replayed_requests, obd->obd_replayed_locks);
1472         if (delta > obd_timeout * 2) {
1473                 CWARN("too long recovery - read logs\n");
1474                 portals_debug_dumplog();
1475         }
1476         target_finish_recovery(obd);
1477
1478         trd->trd_processing_task = 0;
1479         complete(&trd->trd_finishing);
1480         return 0;
1481 }
1482
1483 int target_start_recovery_thread(struct obd_device *obd, svc_handler_t handler)
1484 {
1485         int rc = 0;
1486         struct target_recovery_data *trd = &obd->obd_recovery_data;
1487
1488         memset(trd, 0, sizeof(*trd));
1489         init_completion(&trd->trd_starting);
1490         init_completion(&trd->trd_finishing);
1491         trd->trd_recovery_handler = handler;
1492
1493         if (kernel_thread(target_recovery_thread, obd, 0) > 0) {
1494                 wait_for_completion(&trd->trd_starting);
1495                 LASSERT(obd->obd_recovering != 0);
1496         } else
1497                 rc = -ECHILD;
1498
1499         return rc;
1500 }
1501
1502 void target_stop_recovery_thread(struct obd_device *obd)
1503 {
1504         spin_lock_bh(&obd->obd_processing_task_lock);
1505         if (obd->obd_recovery_data.trd_processing_task > 0) {
1506                 struct target_recovery_data *trd = &obd->obd_recovery_data;
1507                 CERROR("%s: aborting recovery\n", obd->obd_name);
1508                 obd->obd_abort_recovery = 1;
1509                 wake_up(&obd->obd_next_transno_waitq);
1510                 spin_unlock_bh(&obd->obd_processing_task_lock);
1511                 wait_for_completion(&trd->trd_finishing);
1512         } else {
1513                 spin_unlock_bh(&obd->obd_processing_task_lock);
1514         }
1515 }
1516 #endif
1517
1518 int target_process_req_flags(struct obd_device *obd, struct ptlrpc_request *req)
1519 {
1520         struct obd_export *exp = req->rq_export;
1521         LASSERT(exp != NULL);
1522         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
1523                 /* client declares he's ready to replay locks */
1524                 spin_lock_bh(&obd->obd_processing_task_lock);
1525                 if (exp->exp_req_replay_needed) {
1526                         LASSERT(atomic_read(&obd->obd_req_replay_clients) > 0);
1527                         exp->exp_req_replay_needed = 0;
1528                         atomic_dec(&obd->obd_req_replay_clients);
1529                         obd->obd_recoverable_clients--;
1530                         if (atomic_read(&obd->obd_req_replay_clients) == 0)
1531                                 CDEBUG(D_HA, "all clients have replayed reqs\n");
1532                         wake_up(&obd->obd_next_transno_waitq);
1533                 }
1534                 spin_unlock_bh(&obd->obd_processing_task_lock);
1535         }
1536         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
1537                 /* client declares he's ready to complete recovery 
1538                  * so, we put the request on th final queue */
1539                 spin_lock_bh(&obd->obd_processing_task_lock);
1540                 if (exp->exp_lock_replay_needed) {
1541                         LASSERT(atomic_read(&obd->obd_lock_replay_clients) > 0);
1542                         exp->exp_lock_replay_needed = 0;
1543                         atomic_dec(&obd->obd_lock_replay_clients);
1544                         if (atomic_read(&obd->obd_lock_replay_clients) == 0)
1545                                 CDEBUG(D_HA, "all clients have replayed locks\n");
1546                         wake_up(&obd->obd_next_transno_waitq);
1547                 }
1548                 spin_unlock_bh(&obd->obd_processing_task_lock);
1549         }
1550
1551         return 0;
1552 }
1553
1554 int target_queue_recovery_request(struct ptlrpc_request *req,
1555                                   struct obd_device *obd)
1556 {
1557         struct list_head *tmp;
1558         int inserted = 0;
1559         __u64 transno = req->rq_reqmsg->transno;
1560
1561         if (obd->obd_recovery_data.trd_processing_task == current->pid) {
1562                 /* Processing the queue right now, don't re-add. */
1563                 return 1;
1564         }
1565
1566         target_process_req_flags(obd, req);
1567
1568         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
1569                 /* client declares he's ready to complete recovery 
1570                  * so, we put the request on th final queue */
1571                 req = ptlrpc_clone_req(req);
1572                 if (req == NULL)
1573                         return -ENOMEM;
1574                 DEBUG_REQ(D_HA, req, "queue final req");
1575                 spin_lock_bh(&obd->obd_processing_task_lock);
1576                 list_add_tail(&req->rq_list, &obd->obd_final_req_queue);
1577                 spin_unlock_bh(&obd->obd_processing_task_lock);
1578                 return 0;
1579         }
1580         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
1581                 /* client declares he's ready to replay locks */
1582                 req = ptlrpc_clone_req(req);
1583                 if (req == NULL)
1584                         return -ENOMEM;
1585                 DEBUG_REQ(D_HA, req, "queue lock replay req");
1586                 spin_lock_bh(&obd->obd_processing_task_lock);
1587                 list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
1588                 spin_unlock_bh(&obd->obd_processing_task_lock);
1589                 wake_up(&obd->obd_next_transno_waitq);
1590                 return 0;
1591         }
1592
1593
1594         /* CAVEAT EMPTOR: The incoming request message has been swabbed
1595          * (i.e. buflens etc are in my own byte order), but type-dependent
1596          * buffers (eg mds_body, ost_body etc) have NOT been swabbed. */
1597
1598         if (!transno) {
1599                 INIT_LIST_HEAD(&req->rq_list);
1600                 DEBUG_REQ(D_HA, req, "not queueing");
1601                 return 1;
1602         }
1603
1604
1605         /* If we're processing the queue, we want don't want to queue this
1606          * message.
1607          *
1608          * Also, if this request has a transno less than the one we're waiting
1609          * for, we should process it now.  It could (and currently always will)
1610          * be an open request for a descriptor that was opened some time ago.
1611          *
1612          * Also, a resent, replayed request that has already been
1613          * handled will pass through here and be processed immediately.
1614          */
1615         spin_lock_bh(&obd->obd_processing_task_lock);
1616         if (transno < obd->obd_next_recovery_transno && obd->obd_req_replaying) {
1617                 /* Processing the queue right now, don't re-add. */
1618                 LASSERT(list_empty(&req->rq_list));
1619                 spin_unlock_bh(&obd->obd_processing_task_lock);
1620                 return 1;
1621         }
1622         spin_unlock_bh(&obd->obd_processing_task_lock);
1623
1624         /* A resent, replayed request that is still on the queue; just drop it.
1625            The queued request will handle this. */
1626         if ((lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY))
1627             == (MSG_RESENT | MSG_REPLAY)) {
1628                 DEBUG_REQ(D_ERROR, req, "dropping resent queued req");
1629                 return 0;
1630         }
1631
1632         req = ptlrpc_clone_req(req);
1633         if (req == NULL)
1634                 return -ENOMEM;
1635
1636         spin_lock_bh(&obd->obd_processing_task_lock);
1637
1638         /* XXX O(n^2) */
1639         list_for_each(tmp, &obd->obd_req_replay_queue) {
1640                 struct ptlrpc_request *reqiter =
1641                         list_entry(tmp, struct ptlrpc_request, rq_list);
1642
1643                 if (reqiter->rq_reqmsg->transno > transno) {
1644                         list_add_tail(&req->rq_list, &reqiter->rq_list);
1645                         inserted = 1;
1646                         break;
1647                 }
1648         }
1649
1650         if (!inserted)
1651                 list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
1652
1653         obd->obd_requests_queued_for_recovery++;
1654         wake_up(&obd->obd_next_transno_waitq);
1655         spin_unlock_bh(&obd->obd_processing_task_lock);
1656         return 0;
1657 }
1658
1659 struct obd_device * target_req2obd(struct ptlrpc_request *req)
1660 {
1661         return req->rq_export->exp_obd;
1662 }
1663
1664 int
1665 target_send_reply_msg (struct ptlrpc_request *req, int rc, int fail_id)
1666 {
1667         if (OBD_FAIL_CHECK(fail_id | OBD_FAIL_ONCE)) {
1668                 obd_fail_loc |= OBD_FAIL_ONCE | OBD_FAILED;
1669                 DEBUG_REQ(D_ERROR, req, "dropping reply");
1670                 /* NB this does _not_ send with ACK disabled, to simulate
1671                  * sending OK, but timing out for the ACK */
1672                 if (req->rq_reply_state != NULL) {
1673                         if (!req->rq_reply_state->rs_difficult) {
1674                                 lustre_free_reply_state (req->rq_reply_state);
1675                                 req->rq_reply_state = NULL;
1676                         } else {
1677                                 struct ptlrpc_service *svc =
1678                                         req->rq_rqbd->rqbd_srv_ni->sni_service;
1679                                 atomic_inc(&svc->srv_outstanding_replies);
1680                         }
1681                 }
1682                 return (-ECOMM);
1683         }
1684
1685         if (rc) {
1686                 req->rq_status = rc;
1687                 return (ptlrpc_error(req));
1688         } else {
1689                 DEBUG_REQ(D_NET, req, "sending reply");
1690         }
1691         
1692         return (ptlrpc_send_reply(req, 1));
1693 }
1694
1695 void 
1696 target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
1697 {
1698         int                        netrc;
1699         unsigned long              flags;
1700         struct ptlrpc_reply_state *rs;
1701         struct obd_device         *obd;
1702         struct obd_export         *exp;
1703         struct ptlrpc_srv_ni      *sni;
1704         struct ptlrpc_service     *svc;
1705
1706         sni = req->rq_rqbd->rqbd_srv_ni;
1707         svc = sni->sni_service;
1708         
1709         rs = req->rq_reply_state;
1710         if (rs == NULL || !rs->rs_difficult) {
1711                 /* The easy case; no notifiers and reply_out_callback()
1712                  * cleans up (i.e. we can't look inside rs after a
1713                  * successful send) */
1714                 netrc = target_send_reply_msg (req, rc, fail_id);
1715
1716                 LASSERT (netrc == 0 || req->rq_reply_state == NULL);
1717                 return;
1718         }
1719
1720         /* must be an export if locks saved */
1721         LASSERT (req->rq_export != NULL);
1722         /* req/reply consistent */
1723         LASSERT (rs->rs_srv_ni == sni);
1724
1725         /* "fresh" reply */
1726         LASSERT (!rs->rs_scheduled);
1727         LASSERT (!rs->rs_scheduled_ever);
1728         LASSERT (!rs->rs_handled);
1729         LASSERT (!rs->rs_on_net);
1730         LASSERT (rs->rs_export == NULL);
1731         LASSERT (list_empty(&rs->rs_obd_list));
1732         LASSERT (list_empty(&rs->rs_exp_list));
1733
1734         exp = class_export_get (req->rq_export);
1735         obd = exp->exp_obd;
1736
1737         /* disable reply scheduling onto srv_reply_queue while I'm setting up */
1738         rs->rs_scheduled = 1;
1739         rs->rs_on_net    = 1;
1740         rs->rs_xid       = req->rq_xid;
1741         rs->rs_transno   = req->rq_transno;
1742         rs->rs_export    = exp;
1743         
1744         spin_lock_irqsave (&obd->obd_uncommitted_replies_lock, flags);
1745
1746         if (rs->rs_transno > obd->obd_last_committed) {
1747                 /* not committed already */ 
1748                 list_add_tail (&rs->rs_obd_list, 
1749                                &obd->obd_uncommitted_replies);
1750         }
1751
1752         spin_unlock (&obd->obd_uncommitted_replies_lock);
1753         spin_lock (&exp->exp_lock);
1754
1755         list_add_tail (&rs->rs_exp_list, &exp->exp_outstanding_replies);
1756
1757         spin_unlock_irqrestore (&exp->exp_lock, flags);
1758
1759         netrc = target_send_reply_msg (req, rc, fail_id);
1760
1761         spin_lock_irqsave (&svc->srv_lock, flags);
1762
1763         svc->srv_n_difficult_replies++;
1764
1765         if (netrc != 0) /* error sending: reply is off the net */
1766                 rs->rs_on_net = 0;
1767
1768         if (!rs->rs_on_net ||                   /* some notifier */
1769             list_empty(&rs->rs_exp_list) ||     /* completed already */
1770             list_empty(&rs->rs_obd_list)) {
1771                 list_add_tail (&rs->rs_list, &svc->srv_reply_queue);
1772                 wake_up (&svc->srv_waitq);
1773         } else {
1774                 list_add (&rs->rs_list, &sni->sni_active_replies);
1775                 rs->rs_scheduled = 0;           /* allow notifier to schedule */
1776         }
1777
1778         spin_unlock_irqrestore (&svc->srv_lock, flags);
1779 }
1780
1781 int target_handle_ping(struct ptlrpc_request *req)
1782 {
1783         return lustre_pack_reply(req, 0, NULL, NULL);
1784 }