Whamcloud - gitweb
if client_disconnect_export was called without force flag set,
[fs/lustre-release.git] / lustre / ptlrpc / recov_thread.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2003 Cluster File Systems, Inc.
5  *   Author: Andreas Dilger <adilger@clusterfs.com>
6  *
7  *   This file is part of the Lustre file system, http://www.lustre.org
8  *   Lustre is a trademark of Cluster File Systems, Inc.
9  *
10  *   You may have signed or agreed to another license before downloading
11  *   this software.  If so, you are bound by the terms and conditions
12  *   of that agreement, and the following does not apply to you.  See the
13  *   LICENSE file included with this distribution for more information.
14  *
15  *   If you did not agree to a different license, then this copy of Lustre
16  *   is open source software; you can redistribute it and/or modify it
17  *   under the terms of version 2 of the GNU General Public License as
18  *   published by the Free Software Foundation.
19  *
20  *   In either case, Lustre is distributed in the hope that it will be
21  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
22  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23  *   license text for more details.
24  *
25  * OST<->MDS recovery logging thread.
26  *
27  * Invariants in implementation:
28  * - we do not share logs among different OST<->MDS connections, so that
29  *   if an OST or MDS fails it need only look at log(s) relevant to itself
30  */
31
32 #define DEBUG_SUBSYSTEM S_LOG
33
34 #ifndef EXPORT_SYMTAB
35 # define EXPORT_SYMTAB
36 #endif
37
38 #ifdef __KERNEL__
39 # include <libcfs/libcfs.h>
40 #else
41 # include <libcfs/list.h>
42 # include <liblustre.h>
43 #endif
44
45 #include <libcfs/kp30.h>
46 #include <obd_class.h>
47 #include <lustre_commit_confd.h>
48 #include <obd_support.h>
49 #include <obd_class.h>
50 #include <lustre_net.h>
51 #include <lnet/types.h>
52 #include <libcfs/list.h>
53 #include <lustre_log.h>
54 #include "ptlrpc_internal.h"
55
56 #ifdef __KERNEL__
57
58 static struct llog_commit_master lustre_lcm;
59 static struct llog_commit_master *lcm = &lustre_lcm;
60
61 /* Allocate new commit structs in case we do not have enough.
62  * Make the llcd size small enough that it fits into a single page when we
63  * are sending/receiving it. */
64 static int llcd_alloc(void)
65 {
66         struct llog_canceld_ctxt *llcd;
67         int llcd_size;
68
69         /* payload of lustre_msg V2 is bigger */
70         llcd_size = 4096 - lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, NULL);
71         OBD_ALLOC(llcd,
72                   llcd_size + offsetof(struct llog_canceld_ctxt, llcd_cookies));
73         if (llcd == NULL)
74                 return -ENOMEM;
75
76         llcd->llcd_size = llcd_size;
77         llcd->llcd_lcm = lcm;
78
79         spin_lock(&lcm->lcm_llcd_lock);
80         list_add(&llcd->llcd_list, &lcm->lcm_llcd_free);
81         atomic_inc(&lcm->lcm_llcd_numfree);
82         spin_unlock(&lcm->lcm_llcd_lock);
83
84         return 0;
85 }
86
87 /* Get a free cookie struct from the list */
88 struct llog_canceld_ctxt *llcd_grab(void)
89 {
90         struct llog_canceld_ctxt *llcd;
91
92 repeat:
93         spin_lock(&lcm->lcm_llcd_lock);
94         if (list_empty(&lcm->lcm_llcd_free)) {
95                 spin_unlock(&lcm->lcm_llcd_lock);
96                 if (llcd_alloc() < 0) {
97                         CERROR("unable to allocate log commit data!\n");
98                         return NULL;
99                 }
100                 /* check new llcd wasn't grabbed while lock dropped, b=7407 */
101                 goto repeat;
102         }
103
104         llcd = list_entry(lcm->lcm_llcd_free.next, typeof(*llcd), llcd_list);
105         list_del(&llcd->llcd_list);
106         atomic_dec(&lcm->lcm_llcd_numfree);
107         spin_unlock(&lcm->lcm_llcd_lock);
108
109         llcd->llcd_cookiebytes = 0;
110
111         return llcd;
112 }
113 EXPORT_SYMBOL(llcd_grab);
114
115 static void llcd_put(struct llog_canceld_ctxt *llcd)
116 {
117         if (atomic_read(&lcm->lcm_llcd_numfree) >= lcm->lcm_llcd_maxfree) {
118                 int llcd_size = llcd->llcd_size +
119                          offsetof(struct llog_canceld_ctxt, llcd_cookies);
120                 OBD_FREE(llcd, llcd_size);
121         } else {
122                 spin_lock(&lcm->lcm_llcd_lock);
123                 list_add(&llcd->llcd_list, &lcm->lcm_llcd_free);
124                 atomic_inc(&lcm->lcm_llcd_numfree);
125                 spin_unlock(&lcm->lcm_llcd_lock);
126         }
127 }
128
129 /* Send some cookies to the appropriate target */
130 void llcd_send(struct llog_canceld_ctxt *llcd)
131 {
132         spin_lock(&llcd->llcd_lcm->lcm_llcd_lock);
133         list_add_tail(&llcd->llcd_list, &llcd->llcd_lcm->lcm_llcd_pending);
134         spin_unlock(&llcd->llcd_lcm->lcm_llcd_lock);
135
136         cfs_waitq_signal_nr(&llcd->llcd_lcm->lcm_waitq, 1);
137 }
138 EXPORT_SYMBOL(llcd_send);
139
140 /* deleted objects have a commit callback that cancels the MDS
141  * log record for the deletion.  The commit callback calls this
142  * function
143  */
144 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
145                          struct lov_stripe_md *lsm, int count,
146                          struct llog_cookie *cookies, int flags)
147 {
148         struct llog_canceld_ctxt *llcd;
149         int rc = 0;
150         ENTRY;
151
152         LASSERT(ctxt);
153
154         mutex_down(&ctxt->loc_sem);
155         if (ctxt->loc_imp == NULL) {
156                 CDEBUG(D_RPCTRACE, "no import for ctxt %p\n", ctxt);
157                 GOTO(out, rc = 0);
158         }
159
160         llcd = ctxt->loc_llcd;
161
162         if (count > 0 && cookies != NULL) {
163                 if (llcd == NULL) {
164                         llcd = llcd_grab();
165                         if (llcd == NULL) {
166                                 CERROR("couldn't get an llcd - dropped "LPX64
167                                        ":%x+%u\n",
168                                        cookies->lgc_lgl.lgl_oid,
169                                        cookies->lgc_lgl.lgl_ogen, 
170                                        cookies->lgc_index);
171                                 GOTO(out, rc = -ENOMEM);
172                         }
173                         llcd->llcd_ctxt = ctxt;
174                         ctxt->loc_llcd = llcd;
175                 }
176
177                 memcpy((char *)llcd->llcd_cookies + llcd->llcd_cookiebytes, 
178                        cookies, sizeof(*cookies));
179                 llcd->llcd_cookiebytes += sizeof(*cookies);
180         } else {
181                 if (llcd == NULL || !(flags & OBD_LLOG_FL_SENDNOW))
182                         GOTO(out, rc);
183         }
184
185         if ((llcd->llcd_size - llcd->llcd_cookiebytes) < sizeof(*cookies) ||
186             (flags & OBD_LLOG_FL_SENDNOW)) {
187                 CDEBUG(D_RPCTRACE, "send llcd %p:%p\n", llcd, llcd->llcd_ctxt);
188                 ctxt->loc_llcd = NULL;
189                 llcd_send(llcd);
190         }
191 out:
192         mutex_up(&ctxt->loc_sem);
193         return rc;
194 }
195 EXPORT_SYMBOL(llog_obd_repl_cancel);
196
197 int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp)
198 {
199         int rc = 0;
200         ENTRY;
201
202         if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
203                 CDEBUG(D_RPCTRACE,"reverse import disconnect, put llcd %p:%p\n",
204                        ctxt->loc_llcd, ctxt);
205                 mutex_down(&ctxt->loc_sem);
206                 if (ctxt->loc_llcd != NULL) {
207                         llcd_put(ctxt->loc_llcd);
208                         ctxt->loc_llcd = NULL;
209                 }
210                 ctxt->loc_imp = NULL;
211                 mutex_up(&ctxt->loc_sem);
212         } else {
213                 rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
214         }
215
216         RETURN(rc);
217 }
218 EXPORT_SYMBOL(llog_obd_repl_sync);
219
220 static int log_commit_thread(void *arg)
221 {
222         struct llog_commit_master *lcm = arg;
223         struct llog_commit_daemon *lcd;
224         struct llog_canceld_ctxt *llcd, *n;
225         struct obd_import *import = NULL;
226         ENTRY;
227
228         OBD_ALLOC(lcd, sizeof(*lcd));
229         if (lcd == NULL)
230                 RETURN(-ENOMEM);
231
232         spin_lock(&lcm->lcm_thread_lock);
233         THREAD_NAME(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX - 1,
234                     "ll_log_comt_%02d", atomic_read(&lcm->lcm_thread_total));
235         atomic_inc(&lcm->lcm_thread_total);
236         spin_unlock(&lcm->lcm_thread_lock);
237
238         ptlrpc_daemonize(cfs_curproc_comm()); /* thread never needs to do IO */
239
240         CFS_INIT_LIST_HEAD(&lcd->lcd_lcm_list);
241         CFS_INIT_LIST_HEAD(&lcd->lcd_llcd_list);
242         lcd->lcd_lcm = lcm;
243
244         CDEBUG(D_HA, "%s started\n", cfs_curproc_comm());
245         do {
246                 struct ptlrpc_request *request;
247                 struct list_head *sending_list;
248                 int rc = 0;
249
250                 if (import)
251                         class_import_put(import);
252                 import = NULL;
253
254                 /* If we do not have enough pages available, allocate some */
255                 while (atomic_read(&lcm->lcm_llcd_numfree) <
256                        lcm->lcm_llcd_minfree) {
257                         if (llcd_alloc() < 0)
258                                 break;
259                 }
260
261                 spin_lock(&lcm->lcm_thread_lock);
262                 atomic_inc(&lcm->lcm_thread_numidle);
263                 list_move(&lcd->lcd_lcm_list, &lcm->lcm_thread_idle);
264                 spin_unlock(&lcm->lcm_thread_lock);
265
266                 wait_event_interruptible(lcm->lcm_waitq,
267                                          !list_empty(&lcm->lcm_llcd_pending) ||
268                                          lcm->lcm_flags & LLOG_LCM_FL_EXIT);
269
270                 /* If we are the last available thread, start a new one in case
271                  * we get blocked on an RPC (nobody else will start a new one)*/
272                 spin_lock(&lcm->lcm_thread_lock);
273                 atomic_dec(&lcm->lcm_thread_numidle);
274                 list_move(&lcd->lcd_lcm_list, &lcm->lcm_thread_busy);
275                 spin_unlock(&lcm->lcm_thread_lock);
276
277                 sending_list = &lcm->lcm_llcd_pending;
278         resend:
279                 if (import)
280                         class_import_put(import);
281                 import = NULL;
282                 if (lcm->lcm_flags & LLOG_LCM_FL_EXIT) {
283                         lcm->lcm_llcd_maxfree = 0;
284                         lcm->lcm_llcd_minfree = 0;
285                         lcm->lcm_thread_max = 0;
286
287                         if (list_empty(&lcm->lcm_llcd_pending) ||
288                             lcm->lcm_flags & LLOG_LCM_FL_EXIT_FORCE)
289                                 break;
290                 }
291
292                 if (atomic_read(&lcm->lcm_thread_numidle) <= 1 &&
293                     atomic_read(&lcm->lcm_thread_total) < lcm->lcm_thread_max) {
294                         rc = llog_start_commit_thread();
295                         if (rc < 0)
296                                 CERROR("error starting thread: rc %d\n", rc);
297                 }
298
299                 /* Move all of the pending cancels from the same OST off of
300                  * the list, so we don't get multiple threads blocked and/or
301                  * doing upcalls on the same OST in case of failure. */
302                 spin_lock(&lcm->lcm_llcd_lock);
303                 if (!list_empty(sending_list)) {
304                         list_move_tail(sending_list->next,
305                                        &lcd->lcd_llcd_list);
306                         llcd = list_entry(lcd->lcd_llcd_list.next,
307                                           typeof(*llcd), llcd_list);
308                         LASSERT(llcd->llcd_lcm == lcm);
309                         import = llcd->llcd_ctxt->loc_imp;
310                         if (import)
311                                 class_import_get(import);
312                 }
313                 list_for_each_entry_safe(llcd, n, sending_list, llcd_list) {
314                         LASSERT(llcd->llcd_lcm == lcm);
315                         if (import == llcd->llcd_ctxt->loc_imp)
316                                 list_move_tail(&llcd->llcd_list,
317                                                &lcd->lcd_llcd_list);
318                 }
319                 if (sending_list != &lcm->lcm_llcd_resend) {
320                         list_for_each_entry_safe(llcd, n, &lcm->lcm_llcd_resend,
321                                                  llcd_list) {
322                                 LASSERT(llcd->llcd_lcm == lcm);
323                                 if (import == llcd->llcd_ctxt->loc_imp)
324                                         list_move_tail(&llcd->llcd_list,
325                                                        &lcd->lcd_llcd_list);
326                         }
327                 }
328                 spin_unlock(&lcm->lcm_llcd_lock);
329
330                 /* We are the only one manipulating our local list - no lock */
331                 list_for_each_entry_safe(llcd,n, &lcd->lcd_llcd_list,llcd_list){
332                         int size[2] = { sizeof(struct ptlrpc_body),
333                                         llcd->llcd_cookiebytes };
334                         char *bufs[2] = { NULL, (char *)llcd->llcd_cookies };
335
336                         list_del(&llcd->llcd_list);
337                         if (llcd->llcd_cookiebytes == 0) {
338                                 CDEBUG(D_RPCTRACE, "put empty llcd %p:%p\n",
339                                        llcd, llcd->llcd_ctxt);
340                                 llcd_put(llcd);
341                                 continue;
342                         }
343
344                         mutex_down(&llcd->llcd_ctxt->loc_sem);
345                         if (llcd->llcd_ctxt->loc_imp == NULL) {
346                                 mutex_up(&llcd->llcd_ctxt->loc_sem);
347                                 CWARN("import will be destroyed, put "
348                                       "llcd %p:%p\n", llcd, llcd->llcd_ctxt);
349                                 llcd_put(llcd);
350                                 continue;
351                         }
352                         mutex_up(&llcd->llcd_ctxt->loc_sem);
353
354                         if (!import || (import == LP_POISON) ||
355                             (import->imp_client == LP_POISON)) {
356                                 CERROR("No import %p (llcd=%p, ctxt=%p)\n",
357                                        import, llcd, llcd->llcd_ctxt);
358                                 llcd_put(llcd);
359                                 continue;
360                         }
361
362                         OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_RECOV, 10);
363
364                         request = ptlrpc_prep_req(import, LUSTRE_LOG_VERSION,
365                                                   OBD_LOG_CANCEL, 2, size,bufs);
366                         if (request == NULL) {
367                                 rc = -ENOMEM;
368                                 CERROR("error preparing commit: rc %d\n", rc);
369
370                                 spin_lock(&lcm->lcm_llcd_lock);
371                                 list_splice(&lcd->lcd_llcd_list,
372                                             &lcm->lcm_llcd_resend);
373                                 CFS_INIT_LIST_HEAD(&lcd->lcd_llcd_list);
374                                 spin_unlock(&lcm->lcm_llcd_lock);
375                                 break;
376                         }
377
378                         /* XXX FIXME bug 249, 5515 */
379                         request->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
380                         request->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
381
382                         ptlrpc_req_set_repsize(request, 1, NULL);
383                         mutex_down(&llcd->llcd_ctxt->loc_sem);
384                         if (llcd->llcd_ctxt->loc_imp == NULL) {
385                                 mutex_up(&llcd->llcd_ctxt->loc_sem);
386                                 CWARN("import will be destroyed, put "
387                                       "llcd %p:%p\n", llcd, llcd->llcd_ctxt);
388                                 llcd_put(llcd);
389                                 ptlrpc_req_finished(request);
390                                 continue;
391                         }
392                         mutex_up(&llcd->llcd_ctxt->loc_sem);
393                         rc = ptlrpc_queue_wait(request);
394                         ptlrpc_req_finished(request);
395
396                         /* If the RPC failed, we put this and the remaining
397                          * messages onto the resend list for another time. */
398                         if (rc == 0) {
399                                 llcd_put(llcd);
400                                 continue;
401                         }
402
403                         CERROR("commit %p:%p drop %d cookies: rc %d\n",
404                                llcd, llcd->llcd_ctxt,
405                                (int)(llcd->llcd_cookiebytes /
406                                      sizeof(*llcd->llcd_cookies)), rc);
407                         llcd_put(llcd);
408                 }
409
410                 if (rc == 0) {
411                         sending_list = &lcm->lcm_llcd_resend;
412                         if (!list_empty(sending_list))
413                                 goto resend;
414                 }
415         } while(1);
416
417         if (import)
418                 class_import_put(import);
419
420         /* If we are force exiting, just drop all of the cookies. */
421         if (lcm->lcm_flags & LLOG_LCM_FL_EXIT_FORCE) {
422                 spin_lock(&lcm->lcm_llcd_lock);
423                 list_splice(&lcm->lcm_llcd_pending, &lcd->lcd_llcd_list);
424                 list_splice(&lcm->lcm_llcd_resend, &lcd->lcd_llcd_list);
425                 list_splice(&lcm->lcm_llcd_free, &lcd->lcd_llcd_list);
426                 spin_unlock(&lcm->lcm_llcd_lock);
427
428                 list_for_each_entry_safe(llcd, n, &lcd->lcd_llcd_list,llcd_list)
429                         llcd_put(llcd);
430         }
431
432         spin_lock(&lcm->lcm_thread_lock);
433         list_del(&lcd->lcd_lcm_list);
434         spin_unlock(&lcm->lcm_thread_lock);
435         OBD_FREE(lcd, sizeof(*lcd));
436
437         CDEBUG(D_HA, "%s exiting\n", cfs_curproc_comm());
438
439         spin_lock(&lcm->lcm_thread_lock);
440         atomic_dec(&lcm->lcm_thread_total);
441         spin_unlock(&lcm->lcm_thread_lock);
442         cfs_waitq_signal(&lcm->lcm_waitq);
443
444         return 0;
445 }
446
447 int llog_start_commit_thread(void)
448 {
449         int rc;
450         ENTRY;
451
452         if (atomic_read(&lcm->lcm_thread_total) >= lcm->lcm_thread_max)
453                 RETURN(0);
454
455         rc = cfs_kernel_thread(log_commit_thread, lcm, CLONE_VM | CLONE_FILES);
456         if (rc < 0) {
457                 CERROR("error starting thread #%d: %d\n",
458                        atomic_read(&lcm->lcm_thread_total), rc);
459                 RETURN(rc);
460         }
461
462         RETURN(0);
463 }
464 EXPORT_SYMBOL(llog_start_commit_thread);
465
466 static struct llog_process_args {
467         struct semaphore         llpa_sem;
468         struct llog_ctxt        *llpa_ctxt;
469         void                    *llpa_cb;
470         void                    *llpa_arg;
471 } llpa;
472
473 int llog_init_commit_master(void)
474 {
475         CFS_INIT_LIST_HEAD(&lcm->lcm_thread_busy);
476         CFS_INIT_LIST_HEAD(&lcm->lcm_thread_idle);
477         spin_lock_init(&lcm->lcm_thread_lock);
478         atomic_set(&lcm->lcm_thread_numidle, 0);
479         cfs_waitq_init(&lcm->lcm_waitq);
480         CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_pending);
481         CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_resend);
482         CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_free);
483         spin_lock_init(&lcm->lcm_llcd_lock);
484         atomic_set(&lcm->lcm_llcd_numfree, 0);
485         lcm->lcm_llcd_minfree = 0;
486         lcm->lcm_thread_max = 5;
487         /* FIXME initialize semaphore for llog_process_args */
488         sema_init(&llpa.llpa_sem, 1);
489         return 0;
490 }
491
492 int llog_cleanup_commit_master(int force)
493 {
494         lcm->lcm_flags |= LLOG_LCM_FL_EXIT;
495         if (force)
496                 lcm->lcm_flags |= LLOG_LCM_FL_EXIT_FORCE;
497         cfs_waitq_signal(&lcm->lcm_waitq);
498
499         wait_event_interruptible(lcm->lcm_waitq,
500                                  atomic_read(&lcm->lcm_thread_total) == 0);
501         return 0;
502 }
503
504 static int log_process_thread(void *args)
505 {
506         struct llog_process_args *data = args;
507         struct llog_ctxt *ctxt = data->llpa_ctxt;
508         void   *cb = data->llpa_cb;
509         struct llog_logid logid = *(struct llog_logid *)(data->llpa_arg);
510         struct llog_handle *llh = NULL;
511         int rc;
512         ENTRY;
513
514         mutex_up(&data->llpa_sem);
515         ptlrpc_daemonize("llog_process");     /* thread does IO to log files */
516
517         rc = llog_create(ctxt, &llh, &logid, NULL);
518         if (rc) {
519                 CERROR("llog_create failed %d\n", rc);
520                 RETURN(rc);
521         }
522         rc = llog_init_handle(llh, LLOG_F_IS_CAT, NULL);
523         if (rc) {
524                 CERROR("llog_init_handle failed %d\n", rc);
525                 GOTO(out, rc);
526         }
527
528         if (cb) {
529                 rc = llog_cat_process(llh, (llog_cb_t)cb, NULL);
530                 if (rc != LLOG_PROC_BREAK)
531                         CERROR("llog_cat_process failed %d\n", rc);
532         } else {
533                 CWARN("no callback function for recovery\n");
534         }
535
536         CDEBUG(D_HA, "send llcd %p:%p forcibly after recovery\n",
537                ctxt->loc_llcd, ctxt);
538         llog_sync(ctxt, NULL);
539 out:
540         rc = llog_cat_put(llh);
541         if (rc)
542                 CERROR("llog_cat_put failed %d\n", rc);
543
544         RETURN(rc);
545 }
546
547 static int llog_recovery_generic(struct llog_ctxt *ctxt, void *handle,void *arg)
548 {
549         int rc;
550         ENTRY;
551
552         mutex_down(&llpa.llpa_sem);
553         llpa.llpa_ctxt = ctxt;
554         llpa.llpa_cb = handle;
555         llpa.llpa_arg = arg;
556
557         rc = cfs_kernel_thread(log_process_thread, &llpa, CLONE_VM | CLONE_FILES);
558         if (rc < 0)
559                 CERROR("error starting log_process_thread: %d\n", rc);
560         else {
561                 CDEBUG(D_HA, "log_process_thread: %d\n", rc);
562                 rc = 0;
563         }
564
565         RETURN(rc);
566 }
567
568 int llog_repl_connect(struct llog_ctxt *ctxt, int count,
569                       struct llog_logid *logid, struct llog_gen *gen,
570                       struct obd_uuid *uuid)
571 {
572         struct llog_canceld_ctxt *llcd;
573         int rc;
574         ENTRY;
575
576         /* send back llcd before recovery from llog */
577         if (ctxt->loc_llcd != NULL) {
578                 CWARN("llcd %p:%p not empty\n", ctxt->loc_llcd, ctxt);
579                 llog_sync(ctxt, NULL);
580         }
581
582         mutex_down(&ctxt->loc_sem);
583         ctxt->loc_gen = *gen;
584         llcd = llcd_grab();
585         if (llcd == NULL) {
586                 CERROR("couldn't get an llcd\n");
587                 mutex_up(&ctxt->loc_sem);
588                 RETURN(-ENOMEM);
589         }
590         llcd->llcd_ctxt = ctxt;
591         ctxt->loc_llcd = llcd;
592         mutex_up(&ctxt->loc_sem);
593
594         rc = llog_recovery_generic(ctxt, ctxt->llog_proc_cb, logid);
595         if (rc != 0)
596                 CERROR("error recovery process: %d\n", rc);
597
598         RETURN(rc);
599 }
600 EXPORT_SYMBOL(llog_repl_connect);
601
602 #else /* !__KERNEL__ */
603
604 int llog_obd_repl_cancel(struct llog_ctxt *ctxt,
605                          struct lov_stripe_md *lsm, int count,
606                          struct llog_cookie *cookies, int flags)
607 {
608         return 0;
609 }
610 #endif