Whamcloud - gitweb
Fix timeouts when evicting a client with a single lock held (from 1.0.4).
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #ifndef EXPORT_SYMTAB
25 # define EXPORT_SYMTAB
26 #endif
27 #define DEBUG_SUBSYSTEM S_LDLM
28
29 #ifdef __KERNEL__
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
34 #else
35 # include <liblustre.h>
36 #endif
37
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include "ldlm_internal.h"
41
42 extern kmem_cache_t *ldlm_resource_slab;
43 extern kmem_cache_t *ldlm_lock_slab;
44 extern struct lustre_lock ldlm_handle_lock;
45 extern struct list_head ldlm_namespace_list;
46 extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
47 extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
48
49 static DECLARE_MUTEX(ldlm_ref_sem);
50 static int ldlm_refcount = 0;
51
52 /* LDLM state */ 
53
54 static struct ldlm_state *ldlm ;
55
56 inline unsigned long round_timeout(unsigned long timeout)
57 {
58         return ((timeout / HZ) + 1) * HZ;
59 }
60
61 #ifdef __KERNEL__
62 /* XXX should this be per-ldlm? */
63 static struct list_head waiting_locks_list;
64 static spinlock_t waiting_locks_spinlock;
65 static struct timer_list waiting_locks_timer;
66
67 static struct expired_lock_thread {
68         wait_queue_head_t         elt_waitq;
69         int                       elt_state;
70         struct list_head          elt_expired_locks;
71         spinlock_t                elt_lock;
72 } expired_lock_thread;
73 #endif
74
75 #define ELT_STOPPED   0
76 #define ELT_READY     1
77 #define ELT_TERMINATE 2
78
79 struct ldlm_bl_pool {
80         spinlock_t              blp_lock;
81         struct list_head        blp_list;
82         wait_queue_head_t       blp_waitq;
83         atomic_t                blp_num_threads;
84         struct completion       blp_comp;
85 };
86
87 struct ldlm_bl_work_item {
88         struct list_head        blwi_entry;
89         struct ldlm_namespace   *blwi_ns;
90         struct ldlm_lock_desc   blwi_ld;
91         struct ldlm_lock        *blwi_lock;
92 };
93
94 #ifdef __KERNEL__
95
96 static inline int have_expired_locks(void)
97 {
98         int need_to_run;
99
100         spin_lock_bh(&expired_lock_thread.elt_lock);
101         need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
102         spin_unlock_bh(&expired_lock_thread.elt_lock);
103
104         RETURN(need_to_run);
105 }
106
107 static int expired_lock_main(void *arg)
108 {
109         struct list_head *expired = &expired_lock_thread.elt_expired_locks;
110         struct l_wait_info lwi = { 0 };
111         unsigned long flags;
112
113         ENTRY;
114         lock_kernel();
115         kportal_daemonize("ldlm_elt");
116
117         SIGNAL_MASK_LOCK(current, flags);
118         sigfillset(&current->blocked);
119         RECALC_SIGPENDING;
120         SIGNAL_MASK_UNLOCK(current, flags);
121
122         unlock_kernel();
123
124         expired_lock_thread.elt_state = ELT_READY;
125         wake_up(&expired_lock_thread.elt_waitq);
126
127         while (1) {
128                 l_wait_event(expired_lock_thread.elt_waitq,
129                              have_expired_locks() ||
130                              expired_lock_thread.elt_state == ELT_TERMINATE,
131                              &lwi);
132
133                 spin_lock_bh(&expired_lock_thread.elt_lock);
134                 while (!list_empty(expired)) {
135                         struct ldlm_lock *lock = list_entry(expired->next,
136                                                             struct ldlm_lock,
137                                                             l_pending_chain);
138                         spin_unlock_bh(&expired_lock_thread.elt_lock);
139
140                         ptlrpc_fail_export(lock->l_export);
141
142                         spin_lock_bh(&expired_lock_thread.elt_lock);
143                 }
144                 spin_unlock_bh(&expired_lock_thread.elt_lock);
145
146                 if (expired_lock_thread.elt_state == ELT_TERMINATE)
147                         break;
148         }
149
150         expired_lock_thread.elt_state = ELT_STOPPED;
151         wake_up(&expired_lock_thread.elt_waitq);
152         RETURN(0);
153 }
154
155 static void waiting_locks_callback(unsigned long unused)
156 {
157         struct ldlm_lock *lock;
158         char str[PTL_NALFMT_SIZE];
159
160         spin_lock_bh(&waiting_locks_spinlock);
161         while (!list_empty(&waiting_locks_list)) {
162                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
163                                   l_pending_chain);
164
165                 if (lock->l_callback_timeout > jiffies)
166                         break;
167
168                 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
169                            "%s@%s nid "LPX64" (%s) ",
170                            lock->l_export->exp_client_uuid.uuid,
171                            lock->l_export->exp_connection->c_remote_uuid.uuid,
172                            lock->l_export->exp_connection->c_peer.peer_nid,
173                            portals_nid2str(lock->l_export->exp_connection->c_peer.peer_ni->pni_number,
174                                            lock->l_export->exp_connection->c_peer.peer_nid,
175                                            str));
176
177                 spin_lock_bh(&expired_lock_thread.elt_lock);
178                 list_del(&lock->l_pending_chain);
179                 list_add(&lock->l_pending_chain,
180                          &expired_lock_thread.elt_expired_locks);
181                 spin_unlock_bh(&expired_lock_thread.elt_lock);
182                 wake_up(&expired_lock_thread.elt_waitq);
183         }
184
185         /*
186          * Make sure the timer will fire again if we have any locks
187          * left.
188          */
189         if (!list_empty(&waiting_locks_list)) {
190                 unsigned long timeout_rounded;
191                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
192                                   l_pending_chain);
193                 timeout_rounded = round_timeout(lock->l_callback_timeout);
194                 mod_timer(&waiting_locks_timer, timeout_rounded);
195         }
196         spin_unlock_bh(&waiting_locks_spinlock);
197 }
198
199 /*
200  * Indicate that we're waiting for a client to call us back cancelling a given
201  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
202  * timer to fire appropriately.  (We round up to the next second, to avoid
203  * floods of timer firings during periods of high lock contention and traffic).
204  */
205 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
206 {
207         unsigned long timeout_rounded;
208
209         spin_lock_bh(&waiting_locks_spinlock);
210         if (!list_empty(&lock->l_pending_chain)) {
211                 LDLM_DEBUG(lock, "not re-adding to wait list");
212                 spin_unlock_bh(&waiting_locks_spinlock);
213                 return 0;
214         }
215         LDLM_DEBUG(lock, "adding to wait list");
216
217         lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
218
219         timeout_rounded = round_timeout(lock->l_callback_timeout);
220
221         if (timeout_rounded < waiting_locks_timer.expires ||
222             !timer_pending(&waiting_locks_timer)) {
223                 mod_timer(&waiting_locks_timer, timeout_rounded);
224         }
225         list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
226         spin_unlock_bh(&waiting_locks_spinlock);
227         return 1;
228 }
229
230 /*
231  * Remove a lock from the pending list, likely because it had its cancellation
232  * callback arrive without incident.  This adjusts the lock-timeout timer if
233  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
234  */
235 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
236 {
237         struct list_head *list_next;
238
239         if (lock->l_export == NULL) {
240                 /* We don't have a "waiting locks list" on clients. */
241                 LDLM_DEBUG(lock, "client lock: no-op");
242                 return 0;
243         }
244
245         spin_lock_bh(&waiting_locks_spinlock);
246
247         if (list_empty(&lock->l_pending_chain)) {
248                 spin_unlock_bh(&waiting_locks_spinlock);
249                 LDLM_DEBUG(lock, "wasn't waiting");
250                 return 0;
251         }
252
253         list_next = lock->l_pending_chain.next;
254         if (lock->l_pending_chain.prev == &waiting_locks_list) {
255                 /* Removing the head of the list, adjust timer. */
256                 if (list_next == &waiting_locks_list) {
257                         /* No more, just cancel. */
258                         del_timer(&waiting_locks_timer);
259                 } else {
260                         struct ldlm_lock *next;
261                         next = list_entry(list_next, struct ldlm_lock,
262                                           l_pending_chain);
263                         mod_timer(&waiting_locks_timer,
264                                   round_timeout(next->l_callback_timeout));
265                 }
266         }
267
268         spin_lock_bh(&expired_lock_thread.elt_lock);
269         list_del_init(&lock->l_pending_chain);
270         spin_unlock_bh(&expired_lock_thread.elt_lock);
271
272         spin_unlock_bh(&waiting_locks_spinlock);
273         LDLM_DEBUG(lock, "removed");
274         return 1;
275 }
276
277 #else /* !__KERNEL__ */
278
279 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
280 {
281         RETURN(1);
282 }
283
284 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
285 {
286         RETURN(0);
287 }
288
289 #endif /* __KERNEL__ */
290
291 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc, char *ast_type)
292 {
293         const struct ptlrpc_connection *conn = lock->l_export->exp_connection;
294         char str[PTL_NALFMT_SIZE];
295
296         CERROR("%s AST failed (%d) for res "LPU64"/"LPU64
297                ", mode %s: evicting client %s@%s NID "LPX64" (%s)\n",
298                ast_type, rc,
299                lock->l_resource->lr_name.name[0],
300                lock->l_resource->lr_name.name[1],
301                ldlm_lockname[lock->l_granted_mode],
302                lock->l_export->exp_client_uuid.uuid,
303                conn->c_remote_uuid.uuid, conn->c_peer.peer_nid,
304                portals_nid2str(conn->c_peer.peer_ni->pni_number,
305                                conn->c_peer.peer_nid, str));
306         ptlrpc_fail_export(lock->l_export);
307 }
308
309 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
310                              struct ldlm_lock_desc *desc,
311                              void *data, int flag)
312 {
313         struct ldlm_request *body;
314         struct ptlrpc_request *req;
315         int rc = 0, size = sizeof(*body);
316         ENTRY;
317
318         if (flag == LDLM_CB_CANCELING) {
319                 /* Don't need to do anything here. */
320                 RETURN(0);
321         }
322
323         LASSERT(lock);
324
325         l_lock(&lock->l_resource->lr_namespace->ns_lock);
326         if (lock->l_granted_mode != lock->l_req_mode) {
327                 /* this blocking AST will be communicated as part of the
328                  * completion AST instead */
329                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
330                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");                 RETURN(0);
331         }
332
333         if (lock->l_destroyed) {
334                 /* What's the point? */
335                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
336                 RETURN(0);
337         }
338
339 #if 0
340         if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
341                 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
342                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
343                 RETURN(-ETIMEDOUT);
344         }
345 #endif
346
347         req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
348                               LDLM_BL_CALLBACK, 1, &size, NULL);
349         if (req == NULL) {
350                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
351                 RETURN(-ENOMEM);
352         }
353
354         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
355         memcpy(&body->lock_handle1, &lock->l_remote_handle,
356                sizeof(body->lock_handle1));
357         memcpy(&body->lock_desc, desc, sizeof(*desc));
358         body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
359
360         LDLM_DEBUG(lock, "server preparing blocking AST");
361         req->rq_replen = lustre_msg_size(0, NULL);
362
363         if (lock->l_granted_mode == lock->l_req_mode)
364                 ldlm_add_waiting_lock(lock);
365         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
366
367         req->rq_send_state = LUSTRE_IMP_FULL;
368         req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
369         rc = ptlrpc_queue_wait(req);
370         if (rc == -ETIMEDOUT || rc == -EINTR) {
371 #ifdef __KERNEL__
372                 ldlm_del_waiting_lock(lock);
373                 ldlm_failed_ast(lock, rc, "blocking");
374 #else
375                 /* XXX
376                  * Here we treat all clients as liblustre. When BLOCKING AST
377                  * timeout we don't evicting the client and only cancel
378                  * the lock.
379                  * restore to orignial implementation later!!!
380                  * XXX
381                  */
382                 CERROR("BLOCKING AST to client (nid "LPU64") timeout, "
383                        "simply cancel lock 0x%p\n",
384                        req->rq_peer.peer_nid, lock);
385                 ldlm_lock_cancel(lock);
386                 rc = -ERESTART;
387 #endif
388         } else if (rc) {
389                 if (rc == -EINVAL)
390                         CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
391                                "from blocking AST for lock %p--normal race\n",
392                                req->rq_peer.peer_nid,
393                                req->rq_repmsg->status, lock);
394                 else if (rc == -ENOTCONN)
395                         CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
396                                "from blocking AST for lock %p--this client was "
397                                "probably rebooted while it held a lock, nothing"
398                                " serious\n",req->rq_peer.peer_nid,
399                                req->rq_repmsg->status, lock);
400                 else
401                         CDEBUG(D_ERROR, "client (nid "LPU64") returned %d "
402                                "from blocking AST for lock %p\n",
403                                req->rq_peer.peer_nid,
404                                (req->rq_repmsg != NULL)?
405                                req->rq_repmsg->status : 0,
406                                lock);
407                 LDLM_DEBUG(lock, "client sent rc %d rq_status %d from blocking "
408                            "AST", rc, req->rq_status);
409                 ldlm_lock_cancel(lock);
410                 /* Server-side AST functions are called from ldlm_reprocess_all,
411                  * which needs to be told to please restart its reprocessing. */
412                 rc = -ERESTART;
413         }
414
415         ptlrpc_req_finished(req);
416
417         RETURN(rc);
418 }
419
420 /* XXX copied from ptlrpc/service.c */
421 static long timeval_sub(struct timeval *large, struct timeval *small)
422 {
423         return (large->tv_sec - small->tv_sec) * 1000000 +
424                 (large->tv_usec - small->tv_usec);
425 }
426
427 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
428 {
429         struct ldlm_request *body;
430         struct ptlrpc_request *req;
431         struct timeval granted_time;
432         long total_enqueue_wait;
433         int rc = 0, size = sizeof(*body);
434         ENTRY;
435
436         if (lock == NULL) {
437                 LBUG();
438                 RETURN(-EINVAL);
439         }
440
441         do_gettimeofday(&granted_time);
442         total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
443
444         if (total_enqueue_wait / 1000000 > obd_timeout)
445                 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
446
447         req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
448                               LDLM_CP_CALLBACK, 1, &size, NULL);
449         if (!req)
450                 RETURN(-ENOMEM);
451
452         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
453         memcpy(&body->lock_handle1, &lock->l_remote_handle,
454                sizeof(body->lock_handle1));
455         body->lock_flags = flags;
456         ldlm_lock2desc(lock, &body->lock_desc);
457
458         LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
459                    total_enqueue_wait);
460         req->rq_replen = lustre_msg_size(0, NULL);
461
462         req->rq_send_state = LUSTRE_IMP_FULL;
463         req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
464
465         /* We only send real blocking ASTs after the lock is granted */
466         l_lock(&lock->l_resource->lr_namespace->ns_lock);
467         if (lock->l_flags & LDLM_FL_AST_SENT) {
468                 body->lock_flags |= LDLM_FL_AST_SENT;
469                 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
470         }
471         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
472
473         rc = ptlrpc_queue_wait(req);
474         if (rc == -ETIMEDOUT || rc == -EINTR) {
475                 ldlm_del_waiting_lock(lock);
476                 ldlm_failed_ast(lock, rc, "completion");
477         } else if (rc) {
478                 LDLM_ERROR(lock, "client sent rc %d rq_status %d from "
479                            "completion AST\n", rc, req->rq_status);
480                 ldlm_lock_cancel(lock);
481                 /* Server-side AST functions are called from ldlm_reprocess_all,
482                  * which needs to be told to please restart its reprocessing. */
483                 rc = -ERESTART;
484         }
485         ptlrpc_req_finished(req);
486
487         RETURN(rc);
488 }
489
490 int ldlm_handle_enqueue(struct ptlrpc_request *req,
491                         ldlm_completion_callback completion_callback,
492                         ldlm_blocking_callback blocking_callback)
493 {
494         struct obd_device *obddev = req->rq_export->exp_obd;
495         struct ldlm_reply *dlm_rep;
496         struct ldlm_request *dlm_req;
497         int rc, size = sizeof(*dlm_rep), cookielen = 0;
498         __u32 flags;
499         ldlm_error_t err;
500         struct ldlm_lock *lock = NULL;
501         void *cookie = NULL;
502         ENTRY;
503
504         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
505
506         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
507                                       lustre_swab_ldlm_request);
508         if (dlm_req == NULL) {
509                 CERROR ("Can't unpack dlm_req\n");
510                 RETURN (-EFAULT);
511         }
512
513         flags = dlm_req->lock_flags;
514         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN &&
515             (flags & LDLM_FL_HAS_INTENT)) {
516                 /* In this case, the reply buffer is allocated deep in
517                  * local_lock_enqueue by the policy function. */
518                 cookie = req;
519                 cookielen = sizeof(*req);
520         } else {
521                 rc = lustre_pack_reply(req, 1, &size, NULL);
522                 if (rc) {
523                         CERROR("out of memory\n");
524                         RETURN(-ENOMEM);
525                 }
526                 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
527                         cookie = &dlm_req->lock_desc.l_policy_data;
528                         cookielen = sizeof(ldlm_policy_data_t);
529                 }
530         }
531
532         /* The lock's callback data might be set in the policy function */
533         lock = ldlm_lock_create(obddev->obd_namespace,
534                                 &dlm_req->lock_handle2,
535                                 dlm_req->lock_desc.l_resource.lr_name,
536                                 dlm_req->lock_desc.l_resource.lr_type,
537                                 dlm_req->lock_desc.l_req_mode,
538                                 blocking_callback, completion_callback, NULL);
539         if (!lock)
540                 GOTO(out, err = -ENOMEM);
541
542         do_gettimeofday(&lock->l_enqueued_time);
543         memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
544                sizeof(lock->l_remote_handle));
545         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
546
547         LASSERT(req->rq_export);
548         lock->l_export = class_export_get(req->rq_export);
549         l_lock(&lock->l_resource->lr_namespace->ns_lock);
550         list_add(&lock->l_export_chain,
551                  &lock->l_export->exp_ldlm_data.led_held_locks);
552         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
553
554         err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, cookielen,
555                                 &flags);
556         if (err)
557                 GOTO(out, err);
558
559         dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
560         dlm_rep->lock_flags = flags;
561
562         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
563         if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
564                 memcpy(&dlm_rep->lock_policy_data, &lock->l_policy_data,
565                        cookielen);
566         }
567         if (dlm_rep->lock_flags & LDLM_FL_LOCK_CHANGED) {
568                 memcpy(&dlm_rep->lock_resource_name, &lock->l_resource->lr_name,
569                        sizeof(dlm_rep->lock_resource_name));
570                 dlm_rep->lock_mode = lock->l_req_mode;
571         }
572
573         /* We never send a blocking AST until the lock is granted, but
574          * we can tell it right now */
575         l_lock(&lock->l_resource->lr_namespace->ns_lock);
576         if (lock->l_flags & LDLM_FL_AST_SENT) {
577                 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
578                 if (lock->l_granted_mode == lock->l_req_mode)
579                         ldlm_add_waiting_lock(lock);
580         }
581         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
582
583         EXIT;
584  out:
585         req->rq_status = err;
586
587         /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
588          * ldlm_reprocess_all.  If this moves, revisit that code. -phil */
589         if (lock) {
590                 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
591                            "(err=%d)", err);
592                 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
593                         ldlm_reprocess_all(lock->l_resource);
594                 LDLM_LOCK_PUT(lock);
595         }
596         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p)", lock);
597
598         return 0;
599 }
600
601 int ldlm_handle_convert(struct ptlrpc_request *req)
602 {
603         struct ldlm_request *dlm_req;
604         struct ldlm_reply *dlm_rep;
605         struct ldlm_lock *lock;
606         int rc, size = sizeof(*dlm_rep);
607         ENTRY;
608
609         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
610                                       lustre_swab_ldlm_request);
611         if (dlm_req == NULL) {
612                 CERROR ("Can't unpack dlm_req\n");
613                 RETURN (-EFAULT);
614         }
615
616         rc = lustre_pack_reply(req, 1, &size, NULL);
617         if (rc) {
618                 CERROR("out of memory\n");
619                 RETURN(-ENOMEM);
620         }
621         dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
622         dlm_rep->lock_flags = dlm_req->lock_flags;
623
624         lock = ldlm_handle2lock(&dlm_req->lock_handle1);
625         if (!lock) {
626                 req->rq_status = EINVAL;
627         } else {
628                 LDLM_DEBUG(lock, "server-side convert handler START");
629                 ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
630                                   &dlm_rep->lock_flags);
631                 if (ldlm_del_waiting_lock(lock))
632                         CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
633                 req->rq_status = 0;
634         }
635
636         if (lock) {
637                 ldlm_reprocess_all(lock->l_resource);
638                 LDLM_DEBUG(lock, "server-side convert handler END");
639                 LDLM_LOCK_PUT(lock);
640         } else
641                 LDLM_DEBUG_NOLOCK("server-side convert handler END");
642
643         RETURN(0);
644 }
645
646 int ldlm_handle_cancel(struct ptlrpc_request *req)
647 {
648         struct ldlm_request *dlm_req;
649         struct ldlm_lock *lock;
650         char str[PTL_NALFMT_SIZE];
651         int rc;
652         ENTRY;
653
654         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
655                                       lustre_swab_ldlm_request);
656         if (dlm_req == NULL) {
657                 CERROR("bad request buffer for cancel\n");
658                 RETURN(-EFAULT);
659         }
660
661         rc = lustre_pack_reply(req, 0, NULL, NULL);
662         if (rc) {
663                 CERROR("out of memory\n");
664                 RETURN(-ENOMEM);
665         }
666
667         lock = ldlm_handle2lock(&dlm_req->lock_handle1);
668         if (!lock) {
669                 CERROR("received cancel for unknown lock cookie "LPX64
670                        " from nid "LPX64" (%s)\n", dlm_req->lock_handle1.cookie,
671                        req->rq_peer.peer_nid,
672                        portals_nid2str(req->rq_peer.peer_ni->pni_number,
673                                        req->rq_peer.peer_nid, str));
674                 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
675                                   "(cookie "LPU64")",
676                                   dlm_req->lock_handle1.cookie);
677                 req->rq_status = ESTALE;
678         } else {
679                 LDLM_DEBUG(lock, "server-side cancel handler START");
680                 ldlm_lock_cancel(lock);
681                 if (ldlm_del_waiting_lock(lock))
682                         CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
683                 req->rq_status = 0;
684         }
685
686         if (ptlrpc_reply(req) != 0)
687                 LBUG();
688
689         if (lock) {
690                 ldlm_reprocess_all(lock->l_resource);
691                 LDLM_DEBUG(lock, "server-side cancel handler END");
692                 LDLM_LOCK_PUT(lock);
693         }
694
695         RETURN(0);
696 }
697
698 static void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
699                                     struct ldlm_lock_desc *ld,
700                                     struct ldlm_lock *lock)
701 {
702         int do_ast;
703         ENTRY;
704
705         l_lock(&ns->ns_lock);
706         LDLM_DEBUG(lock, "client blocking AST callback handler START");
707
708         lock->l_flags |= LDLM_FL_CBPENDING;
709         do_ast = (!lock->l_readers && !lock->l_writers);
710
711         if (do_ast) {
712                 LDLM_DEBUG(lock, "already unused, calling "
713                            "callback (%p)", lock->l_blocking_ast);
714                 if (lock->l_blocking_ast != NULL) {
715                         l_unlock(&ns->ns_lock);
716                         l_check_no_ns_lock(ns);
717                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
718                                              LDLM_CB_BLOCKING);
719                         l_lock(&ns->ns_lock);
720                 }
721         } else {
722                 LDLM_DEBUG(lock, "Lock still has references, will be"
723                            " cancelled later");
724         }
725
726         LDLM_DEBUG(lock, "client blocking callback handler END");
727         l_unlock(&ns->ns_lock);
728         LDLM_LOCK_PUT(lock);
729         EXIT;
730 }
731
732 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
733                                     struct ldlm_namespace *ns,
734                                     struct ldlm_request *dlm_req,
735                                     struct ldlm_lock *lock)
736 {
737         LIST_HEAD(ast_list);
738         ENTRY;
739
740         l_lock(&ns->ns_lock);
741         LDLM_DEBUG(lock, "client completion callback handler START");
742
743         /* If we receive the completion AST before the actual enqueue returned,
744          * then we might need to switch lock modes, resources, or extents. */
745         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
746                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
747                 LDLM_DEBUG(lock, "completion AST, new lock mode");
748         }
749         if (lock->l_resource->lr_type != LDLM_PLAIN)
750                 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
751                        sizeof(lock->l_policy_data));
752
753         ldlm_resource_unlink_lock(lock);
754         if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
755                    &lock->l_resource->lr_name,
756                    sizeof(lock->l_resource->lr_name)) != 0) {
757                 ldlm_lock_change_resource(ns, lock,
758                                          dlm_req->lock_desc.l_resource.lr_name);
759                 LDLM_DEBUG(lock, "completion AST, new resource");
760         }
761
762         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
763                 lock->l_flags |= LDLM_FL_CBPENDING;
764                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
765         }
766
767         lock->l_resource->lr_tmp = &ast_list;
768         ldlm_grant_lock(lock, req, sizeof(*req), 1);
769         lock->l_resource->lr_tmp = NULL;
770         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
771         l_unlock(&ns->ns_lock);
772         LDLM_LOCK_PUT(lock);
773
774         ldlm_run_ast_work(ns, &ast_list);
775
776         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
777                           lock);
778         EXIT;
779 }
780
781 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
782 {
783         req->rq_status = rc;
784         rc = lustre_pack_reply(req, 0, NULL, NULL);
785         if (rc)
786                 return rc;
787         return ptlrpc_reply(req);
788 }
789
790 #ifdef __KERNEL__
791 static int ldlm_bl_to_thread(struct ldlm_state *ldlm, struct ldlm_namespace *ns,
792                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
793 {
794         struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
795         struct ldlm_bl_work_item *blwi;
796         ENTRY;
797
798         OBD_ALLOC(blwi, sizeof(*blwi));
799         if (blwi == NULL)
800                 RETURN(-ENOMEM);
801
802         blwi->blwi_ns = ns;
803         blwi->blwi_ld = *ld;
804         blwi->blwi_lock = lock;
805
806         spin_lock(&blp->blp_lock);
807         list_add_tail(&blwi->blwi_entry, &blp->blp_list);
808         wake_up(&blp->blp_waitq);
809         spin_unlock(&blp->blp_lock);
810
811         RETURN(0);
812 }
813 #endif
814
815 static int ldlm_callback_handler(struct ptlrpc_request *req)
816 {
817         struct ldlm_namespace *ns;
818         struct ldlm_request *dlm_req;
819         struct ldlm_lock *lock;
820         char str[PTL_NALFMT_SIZE];
821         int rc;
822         ENTRY;
823
824         /* Requests arrive in sender's byte order.  The ptlrpc service
825          * handler has already checked and, if necessary, byte-swapped the
826          * incoming request message body, but I am responsible for the
827          * message buffers. */
828
829         if (req->rq_export == NULL) {
830                 struct ldlm_request *dlm_req;
831
832                 CDEBUG(D_RPCTRACE, "operation %d from nid "LPX64" (%s) with bad "
833                        "export cookie "LPX64" (ptl req %d/rep %d); this is "
834                        "normal if this node rebooted with a lock held\n",
835                        req->rq_reqmsg->opc, req->rq_peer.peer_nid,
836                        portals_nid2str(req->rq_peer.peer_ni->pni_number,
837                                        req->rq_peer.peer_nid, str),
838                        req->rq_reqmsg->handle.cookie,
839                        req->rq_request_portal, req->rq_reply_portal);
840
841                 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
842                                              lustre_swab_ldlm_request);
843                 if (dlm_req != NULL)
844                         CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
845                                dlm_req->lock_handle1.cookie);
846
847                 ldlm_callback_reply(req, -ENOTCONN);
848                 RETURN(0);
849         }
850
851         if (req->rq_reqmsg->opc == LDLM_BL_CALLBACK) {
852                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
853         } else if (req->rq_reqmsg->opc == LDLM_CP_CALLBACK) {
854                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
855         } else if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
856                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
857         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
858                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
859         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
860                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
861         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
862                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
863         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
864                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
865         } else {
866                 ldlm_callback_reply(req, -EPROTO);
867                 RETURN(0);
868         }
869
870         LASSERT(req->rq_export != NULL);
871         LASSERT(req->rq_export->exp_obd != NULL);
872
873         /* FIXME - how to send reply */
874         if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
875                 int rc = llog_origin_handle_cancel(req);
876                 ldlm_callback_reply(req, rc);
877                 RETURN(0);
878         }
879         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
880                 int rc = llog_origin_handle_create(req);
881                 req->rq_status = rc;
882                 ptlrpc_reply(req);
883                 RETURN(0);
884         }
885         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
886                 int rc = llog_origin_handle_next_block(req);
887                 req->rq_status = rc;
888                 ptlrpc_reply(req);
889                 RETURN(0);
890         }
891         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
892                 int rc = llog_origin_handle_read_header(req);
893                 req->rq_status = rc;
894                 ptlrpc_reply(req);
895                 RETURN(0);
896         }
897         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
898                 int rc = llog_origin_handle_close(req);
899                 ldlm_callback_reply(req, rc);
900                 RETURN(0);
901         }
902
903         ns = req->rq_export->exp_obd->obd_namespace;
904         LASSERT(ns != NULL);
905
906         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
907                                       lustre_swab_ldlm_request);
908         if (dlm_req == NULL) {
909                 CERROR ("can't unpack dlm_req\n");
910                 ldlm_callback_reply (req, -EPROTO);
911                 RETURN (0);
912         }
913
914         lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
915         if (!lock) {
916                 CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
917                        dlm_req->lock_handle1.cookie);
918                 ldlm_callback_reply(req, -EINVAL);
919                 RETURN(0);
920         }
921
922         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
923         lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
924
925         /* We want the ost thread to get this reply so that it can respond
926          * to ost requests (write cache writeback) that might be triggered
927          * in the callback.
928          *
929          * But we'd also like to be able to indicate in the reply that we're
930          * cancelling right now, because it's unused, or have an intent result
931          * in the reply, so we might have to push the responsibility for sending
932          * the reply down into the AST handlers, alas. */
933         if (req->rq_reqmsg->opc != LDLM_BL_CALLBACK)
934                 ldlm_callback_reply(req, 0);
935
936         switch (req->rq_reqmsg->opc) {
937         case LDLM_BL_CALLBACK:
938                 CDEBUG(D_INODE, "blocking ast\n");
939 #ifdef __KERNEL__
940                 rc = ldlm_bl_to_thread(ldlm, ns, &dlm_req->lock_desc, lock);
941                 ldlm_callback_reply(req, rc);
942 #else
943                 rc = 0;
944                 ldlm_callback_reply(req, rc);
945                 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
946 #endif
947                 break;
948         case LDLM_CP_CALLBACK:
949                 CDEBUG(D_INODE, "completion ast\n");
950                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
951                 break;
952         default:
953                 LBUG();                         /* checked above */
954         }
955
956         RETURN(0);
957 }
958
959 static int ldlm_cancel_handler(struct ptlrpc_request *req)
960 {
961         int rc;
962         ENTRY;
963
964         /* Requests arrive in sender's byte order.  The ptlrpc service
965          * handler has already checked and, if necessary, byte-swapped the
966          * incoming request message body, but I am responsible for the
967          * message buffers. */
968
969         if (req->rq_export == NULL) {
970                 struct ldlm_request *dlm_req;
971                 CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
972                        req->rq_reqmsg->opc, req->rq_request_portal,
973                        req->rq_reply_portal);
974                 CERROR("--> export cookie: "LPX64"\n",
975                        req->rq_reqmsg->handle.cookie);
976                 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
977                                              lustre_swab_ldlm_request);
978                 if (dlm_req != NULL)
979                         ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
980                 RETURN(-ENOTCONN);
981         }
982
983         switch (req->rq_reqmsg->opc) {
984
985         /* XXX FIXME move this back to mds/handler.c, bug 249 */
986         case LDLM_CANCEL:
987                 CDEBUG(D_INODE, "cancel\n");
988                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
989                 rc = ldlm_handle_cancel(req);
990                 if (rc)
991                         break;
992                 RETURN(0);
993
994         default:
995                 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
996                 RETURN(-EINVAL);
997         }
998
999         RETURN(0);
1000 }
1001
1002 #ifdef __KERNEL__
1003 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
1004 {
1005         struct ldlm_bl_work_item *blwi = NULL;
1006
1007         spin_lock(&blp->blp_lock);
1008         if (!list_empty(&blp->blp_list)) {
1009                 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
1010                                   blwi_entry);
1011                 list_del(&blwi->blwi_entry);
1012         }
1013         spin_unlock(&blp->blp_lock);
1014
1015         return blwi;
1016 }
1017
1018 struct ldlm_bl_thread_data {
1019         int                     bltd_num;
1020         struct ldlm_bl_pool     *bltd_blp;
1021 };
1022
1023 static int ldlm_bl_thread_main(void *arg)
1024 {
1025         struct ldlm_bl_thread_data *bltd = arg;
1026         struct ldlm_bl_pool *blp = bltd->bltd_blp;
1027         unsigned long flags;
1028         ENTRY;
1029
1030         /* XXX boiler-plate */
1031         {
1032                 char name[sizeof(current->comm)];
1033                 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1034                          bltd->bltd_num);
1035                 kportal_daemonize(name);
1036         }
1037         SIGNAL_MASK_LOCK(current, flags);
1038         sigfillset(&current->blocked);
1039         RECALC_SIGPENDING;
1040         SIGNAL_MASK_UNLOCK(current, flags);
1041
1042         atomic_inc(&blp->blp_num_threads);
1043         complete(&blp->blp_comp);
1044
1045         while(1) {
1046                 struct l_wait_info lwi = { 0 };
1047                 struct ldlm_bl_work_item *blwi = NULL;
1048
1049                 l_wait_event_exclusive(blp->blp_waitq,
1050                                        (blwi = ldlm_bl_get_work(blp)) != NULL,
1051                                        &lwi);
1052
1053                 if (blwi->blwi_ns == NULL)
1054                         break;
1055
1056                 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1057                                         blwi->blwi_lock);
1058                 OBD_FREE(blwi, sizeof(*blwi));
1059         }
1060
1061         atomic_dec(&blp->blp_num_threads);
1062         complete(&blp->blp_comp);
1063         RETURN(0);
1064 }
1065
1066 #endif
1067
1068 static int ldlm_setup(void);
1069 static int ldlm_cleanup(int force);
1070
1071 int ldlm_get_ref(void)
1072 {
1073         int rc = 0;
1074         down(&ldlm_ref_sem);
1075         if (++ldlm_refcount == 1) {
1076                 rc = ldlm_setup();
1077                 if (rc)
1078                         ldlm_refcount--;
1079         }
1080         up(&ldlm_ref_sem);
1081
1082         RETURN(rc);
1083 }
1084
1085 void ldlm_put_ref(int force)
1086 {
1087         down(&ldlm_ref_sem);
1088         if (ldlm_refcount == 1) {
1089                 int rc = ldlm_cleanup(force);
1090                 if (rc)
1091                         CERROR("ldlm_cleanup failed: %d\n", rc);
1092                 else
1093                         ldlm_refcount--;
1094         } else {
1095                 ldlm_refcount--;
1096         }
1097         up(&ldlm_ref_sem);
1098
1099         EXIT;
1100 }
1101
1102 static int ldlm_setup(void)
1103 {
1104         struct ldlm_bl_pool *blp;
1105         int rc = 0;
1106 #ifdef __KERNEL__
1107         int i;
1108 #endif
1109         ENTRY;
1110
1111         if (ldlm != NULL)
1112                 RETURN(-EALREADY);
1113
1114         OBD_ALLOC(ldlm, sizeof(*ldlm));
1115         if (ldlm == NULL)
1116                 RETURN(-ENOMEM);
1117
1118 #ifdef __KERNEL__
1119         rc = ldlm_proc_setup();
1120         if (rc != 0)
1121                 GOTO(out_free, rc);
1122 #endif
1123
1124         ldlm->ldlm_cb_service =
1125                 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1126                                 LDLM_MAXREQSIZE, LDLM_CB_REQUEST_PORTAL,
1127                                 LDLM_CB_REPLY_PORTAL,
1128                                 ldlm_callback_handler, "ldlm_cbd", 
1129                                 ldlm_svc_proc_dir);
1130
1131         if (!ldlm->ldlm_cb_service) {
1132                 CERROR("failed to start service\n");
1133                 GOTO(out_proc, rc = -ENOMEM);
1134         }
1135
1136         ldlm->ldlm_cancel_service =
1137                 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1138                                 LDLM_MAXREQSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1139                                 LDLM_CANCEL_REPLY_PORTAL,
1140                                 ldlm_cancel_handler, "ldlm_canceld", 
1141                                 ldlm_svc_proc_dir);
1142
1143         if (!ldlm->ldlm_cancel_service) {
1144                 CERROR("failed to start service\n");
1145                 GOTO(out_proc, rc = -ENOMEM);
1146         }
1147
1148         OBD_ALLOC(blp, sizeof(*blp));
1149         if (blp == NULL)
1150                 GOTO(out_proc, rc = -ENOMEM);
1151         ldlm->ldlm_bl_pool = blp;
1152
1153         atomic_set(&blp->blp_num_threads, 0);
1154         init_waitqueue_head(&blp->blp_waitq);
1155         spin_lock_init(&blp->blp_lock);
1156
1157         INIT_LIST_HEAD(&blp->blp_list);
1158
1159 #ifdef __KERNEL__
1160         for (i = 0; i < LDLM_NUM_THREADS; i++) {
1161                 struct ldlm_bl_thread_data bltd = {
1162                         .bltd_num = i,
1163                         .bltd_blp = blp,
1164                 };
1165                 init_completion(&blp->blp_comp);
1166                 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1167                 if (rc < 0) {
1168                         CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1169                         LBUG();
1170                         GOTO(out_thread, rc);
1171                 }
1172                 wait_for_completion(&blp->blp_comp);
1173         }
1174
1175         rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cancel_service,
1176                                     LDLM_NUM_THREADS, "ldlm_cn");
1177         if (rc) {
1178                 LBUG();
1179                 GOTO(out_thread, rc);
1180         }
1181
1182         rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cb_service,
1183                                     LDLM_NUM_THREADS, "ldlm_cb");
1184         if (rc) {
1185                 LBUG();
1186                 GOTO(out_thread, rc);
1187         }
1188
1189         INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1190         spin_lock_init(&expired_lock_thread.elt_lock);
1191         expired_lock_thread.elt_state = ELT_STOPPED;
1192         init_waitqueue_head(&expired_lock_thread.elt_waitq);
1193
1194         rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1195         if (rc < 0) {
1196                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1197                 GOTO(out_thread, rc);
1198         }
1199
1200         wait_event(expired_lock_thread.elt_waitq,
1201                    expired_lock_thread.elt_state == ELT_READY);
1202
1203         INIT_LIST_HEAD(&waiting_locks_list);
1204         spin_lock_init(&waiting_locks_spinlock);
1205         waiting_locks_timer.function = waiting_locks_callback;
1206         waiting_locks_timer.data = 0;
1207         init_timer(&waiting_locks_timer);
1208 #endif
1209
1210         RETURN(0);
1211
1212 #ifdef __KERNEL__
1213  out_thread:
1214         ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1215         ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1216 #endif
1217
1218  out_proc:
1219 #ifdef __KERNEL__
1220         ldlm_proc_cleanup();
1221  out_free:
1222 #endif
1223         OBD_FREE(ldlm, sizeof(*ldlm));
1224         ldlm = NULL;
1225         return rc;
1226 }
1227
1228 static int ldlm_cleanup(int force)
1229 {
1230 #ifdef __KERNEL__
1231         struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
1232 #endif
1233         ENTRY;
1234
1235         if (!list_empty(&ldlm_namespace_list)) {
1236                 CERROR("ldlm still has namespaces; clean these up first.\n");
1237                 ldlm_dump_all_namespaces();
1238                 RETURN(-EBUSY);
1239         }
1240
1241 #ifdef __KERNEL__
1242         while (atomic_read(&blp->blp_num_threads) > 0) {
1243                 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1244
1245                 init_completion(&blp->blp_comp);
1246
1247                 spin_lock(&blp->blp_lock);
1248                 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1249                 wake_up(&blp->blp_waitq);
1250                 spin_unlock(&blp->blp_lock);
1251
1252                 wait_for_completion(&blp->blp_comp);
1253         }
1254         OBD_FREE(blp, sizeof(*blp));
1255
1256         ptlrpc_stop_all_threads(ldlm->ldlm_cb_service);
1257         ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1258         ptlrpc_stop_all_threads(ldlm->ldlm_cancel_service);
1259         ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1260         ldlm_proc_cleanup();
1261
1262         expired_lock_thread.elt_state = ELT_TERMINATE;
1263         wake_up(&expired_lock_thread.elt_waitq);
1264         wait_event(expired_lock_thread.elt_waitq,
1265                    expired_lock_thread.elt_state == ELT_STOPPED);
1266
1267 #endif
1268
1269         OBD_FREE(ldlm, sizeof(*ldlm));
1270         ldlm = NULL;
1271
1272         RETURN(0);
1273 }
1274
1275 int __init ldlm_init(void)
1276 {
1277         ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1278                                                sizeof(struct ldlm_resource), 0,
1279                                                SLAB_HWCACHE_ALIGN, NULL, NULL);
1280         if (ldlm_resource_slab == NULL)
1281                 return -ENOMEM;
1282
1283         ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1284                                            sizeof(struct ldlm_lock), 0,
1285                                            SLAB_HWCACHE_ALIGN, NULL, NULL);
1286         if (ldlm_lock_slab == NULL) {
1287                 kmem_cache_destroy(ldlm_resource_slab);
1288                 return -ENOMEM;
1289         }
1290
1291         l_lock_init(&ldlm_handle_lock);
1292
1293         return 0;
1294 }
1295
1296 void __exit ldlm_exit(void)
1297 {
1298         if ( ldlm_refcount )
1299                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1300         if (kmem_cache_destroy(ldlm_resource_slab) != 0)
1301                 CERROR("couldn't free ldlm resource slab\n");
1302         if (kmem_cache_destroy(ldlm_lock_slab) != 0)
1303                 CERROR("couldn't free ldlm lock slab\n");
1304 }
1305
1306 /* ldlm_flock.c */
1307 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1308
1309 /* ldlm_lock.c */
1310 EXPORT_SYMBOL(ldlm_lock2desc);
1311 EXPORT_SYMBOL(ldlm_register_intent);
1312 EXPORT_SYMBOL(ldlm_unregister_intent);
1313 EXPORT_SYMBOL(ldlm_lockname);
1314 EXPORT_SYMBOL(ldlm_typename);
1315 EXPORT_SYMBOL(ldlm_lock2handle);
1316 EXPORT_SYMBOL(__ldlm_handle2lock);
1317 EXPORT_SYMBOL(ldlm_lock_put);
1318 EXPORT_SYMBOL(ldlm_lock_match);
1319 EXPORT_SYMBOL(ldlm_lock_cancel);
1320 EXPORT_SYMBOL(ldlm_lock_addref);
1321 EXPORT_SYMBOL(ldlm_lock_decref);
1322 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1323 EXPORT_SYMBOL(ldlm_lock_change_resource);
1324 EXPORT_SYMBOL(ldlm_lock_set_data);
1325 EXPORT_SYMBOL(ldlm_it2str);
1326 EXPORT_SYMBOL(ldlm_lock_dump);
1327 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1328 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1329 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1330
1331 /* ldlm_request.c */
1332 EXPORT_SYMBOL(ldlm_completion_ast);
1333 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1334 EXPORT_SYMBOL(ldlm_cli_convert);
1335 EXPORT_SYMBOL(ldlm_cli_enqueue);
1336 EXPORT_SYMBOL(ldlm_cli_cancel);
1337 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1338 EXPORT_SYMBOL(ldlm_replay_locks);
1339 EXPORT_SYMBOL(ldlm_resource_foreach);
1340 EXPORT_SYMBOL(ldlm_namespace_foreach);
1341 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1342 EXPORT_SYMBOL(ldlm_change_cbdata);
1343
1344 /* ldlm_lockd.c */
1345 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1346 EXPORT_SYMBOL(ldlm_server_completion_ast);
1347 EXPORT_SYMBOL(ldlm_handle_enqueue);
1348 EXPORT_SYMBOL(ldlm_handle_cancel);
1349 EXPORT_SYMBOL(ldlm_handle_convert);
1350 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1351 EXPORT_SYMBOL(ldlm_get_ref);
1352 EXPORT_SYMBOL(ldlm_put_ref);
1353
1354 #if 0
1355 /* ldlm_test.c */
1356 EXPORT_SYMBOL(ldlm_test);
1357 EXPORT_SYMBOL(ldlm_regression_start);
1358 EXPORT_SYMBOL(ldlm_regression_stop);
1359 #endif
1360
1361 /* ldlm_resource.c */
1362 EXPORT_SYMBOL(ldlm_namespace_new);
1363 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1364 EXPORT_SYMBOL(ldlm_namespace_free);
1365
1366 /* l_lock.c */
1367 EXPORT_SYMBOL(l_lock);
1368 EXPORT_SYMBOL(l_unlock);
1369
1370 /* ldlm_lib.c */
1371 EXPORT_SYMBOL(client_obd_setup);
1372 EXPORT_SYMBOL(client_obd_cleanup);
1373 EXPORT_SYMBOL(client_connect_import);
1374 EXPORT_SYMBOL(client_disconnect_export);
1375 EXPORT_SYMBOL(target_abort_recovery);
1376 EXPORT_SYMBOL(target_handle_connect);
1377 EXPORT_SYMBOL(target_destroy_export);
1378 EXPORT_SYMBOL(target_cancel_recovery_timer);
1379 EXPORT_SYMBOL(target_send_reply);
1380 EXPORT_SYMBOL(target_queue_recovery_request);
1381 EXPORT_SYMBOL(target_handle_ping);
1382 EXPORT_SYMBOL(target_handle_disconnect);
1383 EXPORT_SYMBOL(target_queue_final_reply);
1384 EXPORT_SYMBOL(ldlm_put_lock_into_req);