Whamcloud - gitweb
land b1_5 onto HEAD
[fs/lustre-release.git] / lustre / ptlrpc / recov_thread.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2003 Cluster File Systems, Inc.
5  *   Author: Andreas Dilger <adilger@clusterfs.com>
6  *
7  *   This file is part of the Lustre file system, http://www.lustre.org
8  *   Lustre is a trademark of Cluster File Systems, Inc.
9  *
10  *   You may have signed or agreed to another license before downloading
11  *   this software.  If so, you are bound by the terms and conditions
12  *   of that agreement, and the following does not apply to you.  See the
13  *   LICENSE file included with this distribution for more information.
14  *
15  *   If you did not agree to a different license, then this copy of Lustre
16  *   is open source software; you can redistribute it and/or modify it
17  *   under the terms of version 2 of the GNU General Public License as
18  *   published by the Free Software Foundation.
19  *
20  *   In either case, Lustre is distributed in the hope that it will be
21  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  *   license text for more details.
24  *
25  * OST<->MDS recovery logging thread.
26  *
27  * Invariants in implementation:
28  * - we do not share logs among different OST<->MDS connections, so that
29  *   if an OST or MDS fails it need only look at log(s) relevant to itself
30  */
31
32 #define DEBUG_SUBSYSTEM S_LOG
33
34 #ifndef EXPORT_SYMTAB
35 # define EXPORT_SYMTAB
36 #endif
37
38 #ifdef __KERNEL__
39 # include <libcfs/libcfs.h>
40 #else
41 # include <libcfs/list.h>
42 # include <liblustre.h>
43 #endif
44
45 #include <libcfs/kp30.h>
46 #include <obd_class.h>
47 #include <lustre_commit_confd.h>
48 #include <obd_support.h>
49 #include <obd_class.h>
50 #include <lustre_net.h>
51 #include <lnet/types.h>
52 #include <libcfs/list.h>
53 #include <lustre_log.h>
54 #include "ptlrpc_internal.h"
55
56 #ifdef __KERNEL__
57
58 static struct llog_commit_master lustre_lcm;
59 static struct llog_commit_master *lcm = &lustre_lcm;
60
61 /* Allocate new commit structs in case we do not have enough.
62  * Make the llcd size small enough that it fits into a single page when we
63  * are sending/receiving it. */
64 static int llcd_alloc(void)
65 {
66         struct llog_canceld_ctxt *llcd;
67         int llcd_size;
68
69         /* payload of lustre_msg V2 is bigger */
70         llcd_size = 4096 - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
71         OBD_ALLOC(llcd,
72                   llcd_size + offsetof(struct llog_canceld_ctxt, llcd_cookies));
73         if (llcd == NULL)
74                 return -ENOMEM;
75
76         llcd->llcd_size = llcd_size;
77         llcd->llcd_lcm = lcm;
78
79         spin_lock(&lcm->lcm_llcd_lock);
80         list_add(&llcd->llcd_list, &lcm->lcm_llcd_free);
81         atomic_inc(&lcm->lcm_llcd_numfree);
82         spin_unlock(&lcm->lcm_llcd_lock);
83
84         return 0;
85 }
86
87 /* Get a free cookie struct from the list */
88 struct llog_canceld_ctxt *llcd_grab(void)
89 {
90         struct llog_canceld_ctxt *llcd;
91
92 repeat:
93         spin_lock(&lcm->lcm_llcd_lock);
94         if (list_empty(&lcm->lcm_llcd_free)) {
95                 spin_unlock(&lcm->lcm_llcd_lock);
96                 if (llcd_alloc() < 0) {
97                         CERROR("unable to allocate log commit data!\n");
98                         return NULL;
99                 }
100                 /* check new llcd wasn't grabbed while lock dropped, b=7407 */
101                 goto repeat;
102         }
103
104         llcd = list_entry(lcm->lcm_llcd_free.next, typeof(*llcd), llcd_list);
105         list_del(&llcd->llcd_list);
106         atomic_dec(&lcm->lcm_llcd_numfree);
107         spin_unlock(&lcm->lcm_llcd_lock);
108
109         llcd->llcd_cookiebytes = 0;
110
111         return llcd;
112 }
113 EXPORT_SYMBOL(llcd_grab);
114
115 static void llcd_put(struct llog_canceld_ctxt *llcd)
116 {
117         if (atomic_read(&lcm->lcm_llcd_numfree) >= lcm->lcm_llcd_maxfree) {
118                 int llcd_size = llcd->llcd_size +
119                          offsetof(struct llog_canceld_ctxt, llcd_cookies);
120                 OBD_FREE(llcd, llcd_size);
121         } else {
122                 spin_lock(&lcm->lcm_llcd_lock);
123                 list_add(&llcd->llcd_list, &lcm->lcm_llcd_free);
124                 atomic_inc(&lcm->lcm_llcd_numfree);
125                 spin_unlock(&lcm->lcm_llcd_lock);
126         }
127 }
128
129 /* Send some cookies to the appropriate target */
130 void llcd_send(struct llog_canceld_ctxt *llcd)
131 {
132         spin_lock(&llcd->llcd_lcm->lcm_llcd_lock);
133         list_add_tail(&llcd->llcd_list, &llcd->llcd_lcm->lcm_llcd_pending);
134         spin_unlock(&llcd->llcd_lcm->lcm_llcd_lock);
135
136         cfs_waitq_signal_nr(&llcd->llcd_lcm->lcm_waitq, 1);
137 }
138 EXPORT_SYMBOL(llcd_send);
139
140 /* deleted objects have a commit callback that cancels the MDS
141  * log record for the deletion.  The commit callback calls this
142  * function
143  */
144 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
145                          struct lov_stripe_md *lsm, int count,
146                          struct llog_cookie *cookies, int flags)
147 {
148         struct llog_canceld_ctxt *llcd;
149         int rc = 0;
150         ENTRY;
151
152         LASSERT(ctxt);
153
154         mutex_down(&ctxt->loc_sem);
155         if (ctxt->loc_imp == NULL) {
156                 CDEBUG(D_HA, "no import for ctxt %p\n", ctxt);
157                 GOTO(out, rc = 0);
158         }
159
160         llcd = ctxt->loc_llcd;
161
162         if (count > 0 && cookies != NULL) {
163                 if (llcd == NULL) {
164                         llcd = llcd_grab();
165                         if (llcd == NULL) {
166                                 CERROR("couldn't get an llcd - dropped "LPX64
167                                        ":%x+%u\n",
168                                        cookies->lgc_lgl.lgl_oid,
169                                        cookies->lgc_lgl.lgl_ogen,
170                                        cookies->lgc_index);
171                                 GOTO(out, rc = -ENOMEM);
172                         }
173                         llcd->llcd_ctxt = ctxt;
174                         ctxt->loc_llcd = llcd;
175                 }
176
177                 memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes,
178                        cookies, sizeof(*cookies));
179                 llcd->llcd_cookiebytes += sizeof(*cookies);
180         } else {
181                 if (llcd == NULL || !(flags & OBD_LLOG_FL_SENDNOW))
182                         GOTO(out, rc);
183         }
184
185         if ((llcd->llcd_size - llcd->llcd_cookiebytes) < sizeof(*cookies) ||
186             (flags & OBD_LLOG_FL_SENDNOW)) {
187                 CDEBUG(D_HA, "send llcd %p:%p\n", llcd, llcd->llcd_ctxt);
188                 ctxt->loc_llcd = NULL;
189                 llcd_send(llcd);
190         }
191 out:
192         mutex_up(&ctxt->loc_sem);
193         return rc;
194 }
195 EXPORT_SYMBOL(llog_obd_repl_cancel);
196
197 int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
198 {
199         int rc = 0;
200         ENTRY;
201
202         if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
203                 CDEBUG(D_HA, "reverse import disconnected, put llcd %p:%p\n",
204                        ctxt->loc_llcd, ctxt);
205                 mutex_down(&ctxt->loc_sem);
206                 if (ctxt->loc_llcd != NULL) {
207                         llcd_put(ctxt->loc_llcd);
208                         ctxt->loc_llcd = NULL;
209                 }
210                 ctxt->loc_imp = NULL;
211                 mutex_up(&ctxt->loc_sem);
212         } else {
213                 rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
214         }
215
216         RETURN(rc);
217 }
218 EXPORT_SYMBOL(llog_obd_repl_sync);
219
220 static int log_commit_thread(void *arg)
221 {
222         struct llog_commit_master *lcm = arg;
223         struct llog_commit_daemon *lcd;
224         struct llog_canceld_ctxt *llcd, *n;
225         ENTRY;
226
227         OBD_ALLOC(lcd, sizeof(*lcd));
228         if (lcd == NULL)
229                 RETURN(-ENOMEM);
230
231         spin_lock(&lcm->lcm_thread_lock);
232         THREAD_NAME(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX - 1,
233                     "ll_log_comt_%02d", atomic_read(&lcm->lcm_thread_total));
234         atomic_inc(&lcm->lcm_thread_total);
235         spin_unlock(&lcm->lcm_thread_lock);
236
237         ptlrpc_daemonize(cfs_curproc_comm()); /* thread never needs to do IO */
238
239         CFS_INIT_LIST_HEAD(&lcd->lcd_lcm_list);
240         CFS_INIT_LIST_HEAD(&lcd->lcd_llcd_list);
241         lcd->lcd_lcm = lcm;
242
243         CDEBUG(D_HA, "%s started\n", cfs_curproc_comm());
244         do {
245                 struct ptlrpc_request *request;
246                 struct obd_import *import = NULL;
247                 struct list_head *sending_list;
248                 int rc = 0;
249
250                 /* If we do not have enough pages available, allocate some */
251                 while (atomic_read(&lcm->lcm_llcd_numfree) <
252                        lcm->lcm_llcd_minfree) {
253                         if (llcd_alloc() < 0)
254                                 break;
255                 }
256
257                 spin_lock(&lcm->lcm_thread_lock);
258                 atomic_inc(&lcm->lcm_thread_numidle);
259                 list_move(&lcd->lcd_lcm_list, &lcm->lcm_thread_idle);
260                 spin_unlock(&lcm->lcm_thread_lock);
261
262                 wait_event_interruptible(lcm->lcm_waitq,
263                                          !list_empty(&lcm->lcm_llcd_pending) ||
264                                          lcm->lcm_flags & LLOG_LCM_FL_EXIT);
265
266                 /* If we are the last available thread, start a new one in case
267                  * we get blocked on an RPC (nobody else will start a new one)*/
268                 spin_lock(&lcm->lcm_thread_lock);
269                 atomic_dec(&lcm->lcm_thread_numidle);
270                 list_move(&lcd->lcd_lcm_list, &lcm->lcm_thread_busy);
271                 spin_unlock(&lcm->lcm_thread_lock);
272
273                 sending_list = &lcm->lcm_llcd_pending;
274         resend:
275                 import = NULL;
276                 if (lcm->lcm_flags & LLOG_LCM_FL_EXIT) {
277                         lcm->lcm_llcd_maxfree = 0;
278                         lcm->lcm_llcd_minfree = 0;
279                         lcm->lcm_thread_max = 0;
280
281                         if (list_empty(&lcm->lcm_llcd_pending) ||
282                             lcm->lcm_flags & LLOG_LCM_FL_EXIT_FORCE)
283                                 break;
284                 }
285
286                 if (atomic_read(&lcm->lcm_thread_numidle) <= 1 &&
287                     atomic_read(&lcm->lcm_thread_total) < lcm->lcm_thread_max) {
288                         rc = llog_start_commit_thread();
289                         if (rc < 0)
290                                 CERROR("error starting thread: rc %d\n", rc);
291                 }
292
293                 /* Move all of the pending cancels from the same OST off of
294                  * the list, so we don't get multiple threads blocked and/or
295                  * doing upcalls on the same OST in case of failure. */
296                 spin_lock(&lcm->lcm_llcd_lock);
297                 if (!list_empty(sending_list)) {
298                         list_move_tail(sending_list->next,
299                                        &lcd->lcd_llcd_list);
300                         llcd = list_entry(lcd->lcd_llcd_list.next,
301                                           typeof(*llcd), llcd_list);
302                         LASSERT(llcd->llcd_lcm == lcm);
303                         import = llcd->llcd_ctxt->loc_imp;
304                 }
305                 list_for_each_entry_safe(llcd, n, sending_list, llcd_list) {
306                         LASSERT(llcd->llcd_lcm == lcm);
307                         if (import == llcd->llcd_ctxt->loc_imp)
308                                 list_move_tail(&llcd->llcd_list,
309                                                &lcd->lcd_llcd_list);
310                 }
311                 if (sending_list != &lcm->lcm_llcd_resend) {
312                         list_for_each_entry_safe(llcd, n, &lcm->lcm_llcd_resend,
313                                                  llcd_list) {
314                                 LASSERT(llcd->llcd_lcm == lcm);
315                                 if (import == llcd->llcd_ctxt->loc_imp)
316                                         list_move_tail(&llcd->llcd_list,
317                                                        &lcd->lcd_llcd_list);
318                         }
319                 }
320                 spin_unlock(&lcm->lcm_llcd_lock);
321
322                 /* We are the only one manipulating our local list - no lock */
323                 list_for_each_entry_safe(llcd,n, &lcd->lcd_llcd_list,llcd_list){
324                         int size[2] = { sizeof(struct ptlrpc_body),
325                                         llcd->llcd_cookiebytes };
326                         char *bufs[2] = { NULL, (char *)llcd->llcd_cookies };
327
328                         list_del(&llcd->llcd_list);
329                         if (llcd->llcd_cookiebytes == 0) {
330                                 CDEBUG(D_HA, "put empty llcd %p:%p\n",
331                                        llcd, llcd->llcd_ctxt);
332                                 llcd_put(llcd);
333                                 continue;
334                         }
335
336                         mutex_down(&llcd->llcd_ctxt->loc_sem);
337                         if (llcd->llcd_ctxt->loc_imp == NULL) {
338                                 mutex_up(&llcd->llcd_ctxt->loc_sem);
339                                 CWARN("import will be destroyed, put "
340                                       "llcd %p:%p\n", llcd, llcd->llcd_ctxt);
341                                 llcd_put(llcd);
342                                 continue;
343                         }
344                         mutex_up(&llcd->llcd_ctxt->loc_sem);
345
346                         if (!import || (import == LP_POISON) ||
347                             (import->imp_client == LP_POISON)) {
348                                 CERROR("No import %p (llcd=%p, ctxt=%p)\n",
349                                        import, llcd, llcd->llcd_ctxt);
350                                 llcd_put(llcd);
351                                 continue;
352                         }
353
354                         request = ptlrpc_prep_req(import, LUSTRE_LOG_VERSION,
355                                                   OBD_LOG_CANCEL, 2, size,bufs);
356                         if (request == NULL) {
357                                 rc = -ENOMEM;
358                                 CERROR("error preparing commit: rc %d\n", rc);
359
360                                 spin_lock(&lcm->lcm_llcd_lock);
361                                 list_splice(&lcd->lcd_llcd_list,
362                                             &lcm->lcm_llcd_resend);
363                                 CFS_INIT_LIST_HEAD(&lcd->lcd_llcd_list);
364                                 spin_unlock(&lcm->lcm_llcd_lock);
365                                 break;
366                         }
367
368                         /* XXX FIXME bug 249, 5515 */
369                         request->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
370                         request->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
371
372                         ptlrpc_req_set_repsize(request, 1, NULL);
373                         mutex_down(&llcd->llcd_ctxt->loc_sem);
374                         if (llcd->llcd_ctxt->loc_imp == NULL) {
375                                 mutex_up(&llcd->llcd_ctxt->loc_sem);
376                                 CWARN("import will be destroyed, put "
377                                       "llcd %p:%p\n", llcd, llcd->llcd_ctxt);
378                                 llcd_put(llcd);
379                                 ptlrpc_req_finished(request);
380                                 continue;
381                         }
382                         mutex_up(&llcd->llcd_ctxt->loc_sem);
383                         rc = ptlrpc_queue_wait(request);
384                         ptlrpc_req_finished(request);
385
386                         /* If the RPC failed, we put this and the remaining
387                          * messages onto the resend list for another time. */
388                         if (rc == 0) {
389                                 llcd_put(llcd);
390                                 continue;
391                         }
392
393                         CERROR("commit %p:%p drop %d cookies: rc %d\n",
394                                llcd, llcd->llcd_ctxt,
395                                (int)(llcd->llcd_cookiebytes /
396                                      sizeof(*llcd->llcd_cookies)), rc);
397                         llcd_put(llcd);
398                 }
399
400                 if (rc == 0) {
401                         sending_list = &lcm->lcm_llcd_resend;
402                         if (!list_empty(sending_list))
403                                 goto resend;
404                 }
405         } while(1);
406
407         /* If we are force exiting, just drop all of the cookies. */
408         if (lcm->lcm_flags & LLOG_LCM_FL_EXIT_FORCE) {
409                 spin_lock(&lcm->lcm_llcd_lock);
410                 list_splice(&lcm->lcm_llcd_pending, &lcd->lcd_llcd_list);
411                 list_splice(&lcm->lcm_llcd_resend, &lcd->lcd_llcd_list);
412                 list_splice(&lcm->lcm_llcd_free, &lcd->lcd_llcd_list);
413                 spin_unlock(&lcm->lcm_llcd_lock);
414
415                 list_for_each_entry_safe(llcd, n, &lcd->lcd_llcd_list,llcd_list)
416                         llcd_put(llcd);
417         }
418
419         spin_lock(&lcm->lcm_thread_lock);
420         list_del(&lcd->lcd_lcm_list);
421         spin_unlock(&lcm->lcm_thread_lock);
422         OBD_FREE(lcd, sizeof(*lcd));
423
424         CDEBUG(D_HA, "%s exiting\n", cfs_curproc_comm());
425
426         spin_lock(&lcm->lcm_thread_lock);
427         atomic_dec(&lcm->lcm_thread_total);
428         spin_unlock(&lcm->lcm_thread_lock);
429         cfs_waitq_signal(&lcm->lcm_waitq);
430
431         return 0;
432 }
433
434 int llog_start_commit_thread(void)
435 {
436         int rc;
437         ENTRY;
438
439         if (atomic_read(&lcm->lcm_thread_total) >= lcm->lcm_thread_max)
440                 RETURN(0);
441
442         rc = cfs_kernel_thread(log_commit_thread, lcm, CLONE_VM | CLONE_FILES);
443         if (rc < 0) {
444                 CERROR("error starting thread #%d: %d\n",
445                        atomic_read(&lcm->lcm_thread_total), rc);
446                 RETURN(rc);
447         }
448
449         RETURN(0);
450 }
451 EXPORT_SYMBOL(llog_start_commit_thread);
452
453 static struct llog_process_args {
454         struct semaphore         llpa_sem;
455         struct llog_ctxt        *llpa_ctxt;
456         void                    *llpa_cb;
457         void                    *llpa_arg;
458 } llpa;
459
460 int llog_init_commit_master(void)
461 {
462         CFS_INIT_LIST_HEAD(&lcm->lcm_thread_busy);
463         CFS_INIT_LIST_HEAD(&lcm->lcm_thread_idle);
464         spin_lock_init(&lcm->lcm_thread_lock);
465         atomic_set(&lcm->lcm_thread_numidle, 0);
466         cfs_waitq_init(&lcm->lcm_waitq);
467         CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_pending);
468         CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_resend);
469         CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_free);
470         spin_lock_init(&lcm->lcm_llcd_lock);
471         atomic_set(&lcm->lcm_llcd_numfree, 0);
472         lcm->lcm_llcd_minfree = 0;
473         lcm->lcm_thread_max = 5;
474         /* FIXME initialize semaphore for llog_process_args */
475         sema_init(&llpa.llpa_sem, 1);
476         return 0;
477 }
478
479 int llog_cleanup_commit_master(int force)
480 {
481         lcm->lcm_flags |= LLOG_LCM_FL_EXIT;
482         if (force)
483                 lcm->lcm_flags |= LLOG_LCM_FL_EXIT_FORCE;
484         cfs_waitq_signal(&lcm->lcm_waitq);
485
486         wait_event_interruptible(lcm->lcm_waitq,
487                                  atomic_read(&lcm->lcm_thread_total) == 0);
488         return 0;
489 }
490
491 static int log_process_thread(void *args)
492 {
493         struct llog_process_args *data = args;
494         struct llog_ctxt *ctxt = data->llpa_ctxt;
495         void   *cb = data->llpa_cb;
496         struct llog_logid logid = *(struct llog_logid *)(data->llpa_arg);
497         struct llog_handle *llh = NULL;
498         int rc;
499         ENTRY;
500
501         mutex_up(&data->llpa_sem);
502         ptlrpc_daemonize("llog_process");     /* thread does IO to log files */
503
504         rc = llog_create(ctxt, &llh, &logid, NULL);
505         if (rc) {
506                 CERROR("llog_create failed %d\n", rc);
507                 RETURN(rc);
508         }
509         rc = llog_init_handle(llh, LLOG_F_IS_CAT, NULL);
510         if (rc) {
511                 CERROR("llog_init_handle failed %d\n", rc);
512                 GOTO(out, rc);
513         }
514
515         if (cb) {
516                 rc = llog_cat_process(llh, (llog_cb_t)cb, NULL);
517                 if (rc != LLOG_PROC_BREAK)
518                         CERROR("llog_cat_process failed %d\n", rc);
519         } else {
520                 CWARN("no callback function for recovery\n");
521         }
522
523         CDEBUG(D_HA, "send llcd %p:%p forcibly after recovery\n",
524                ctxt->loc_llcd, ctxt);
525         llog_sync(ctxt, NULL);
526 out:
527         rc = llog_cat_put(llh);
528         if (rc)
529                 CERROR("llog_cat_put failed %d\n", rc);
530
531         RETURN(rc);
532 }
533
534 static int llog_recovery_generic(struct llog_ctxt *ctxt, void *handle,void *arg)
535 {
536         int rc;
537         ENTRY;
538
539         mutex_down(&llpa.llpa_sem);
540         llpa.llpa_ctxt = ctxt;
541         llpa.llpa_cb = handle;
542         llpa.llpa_arg = arg;
543
544         rc = cfs_kernel_thread(log_process_thread, &llpa, CLONE_VM | CLONE_FILES);
545         if (rc < 0)
546                 CERROR("error starting log_process_thread: %d\n", rc);
547         else {
548                 CDEBUG(D_HA, "log_process_thread: %d\n", rc);
549                 rc = 0;
550         }
551
552         RETURN(rc);
553 }
554
555 int llog_repl_connect(struct llog_ctxt *ctxt, int count,
556                       struct llog_logid *logid, struct llog_gen *gen,
557                       struct obd_uuid *uuid)
558 {
559         struct llog_canceld_ctxt *llcd;
560         int rc;
561         ENTRY;
562
563         /* send back llcd before recovery from llog */
564         if (ctxt->loc_llcd != NULL) {
565                 CWARN("llcd %p:%p not empty\n", ctxt->loc_llcd, ctxt);
566                 llog_sync(ctxt, NULL);
567         }
568
569         mutex_down(&ctxt->loc_sem);
570         ctxt->loc_gen = *gen;
571         llcd = llcd_grab();
572         if (llcd == NULL) {
573                 CERROR("couldn't get an llcd\n");
574                 mutex_up(&ctxt->loc_sem);
575                 RETURN(-ENOMEM);
576         }
577         llcd->llcd_ctxt = ctxt;
578         ctxt->loc_llcd = llcd;
579         mutex_up(&ctxt->loc_sem);
580
581         rc = llog_recovery_generic(ctxt, ctxt->llog_proc_cb, logid);
582         if (rc != 0)
583                 CERROR("error recovery process: %d\n", rc);
584
585         RETURN(rc);
586 }
587 EXPORT_SYMBOL(llog_repl_connect);
588
589 #else /* !__KERNEL__ */
590
591 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
592                          struct lov_stripe_md *lsm, int count,
593                          struct llog_cookie *cookies, int flags)
594 {
595         return 0;
596 }
597 #endif