Whamcloud - gitweb
042a383b71f76620d34637fc320553cf42c0037d
[fs/lustre-release.git] / lustre / ldlm / ldlm_lockd.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #ifndef EXPORT_SYMTAB
25 # define EXPORT_SYMTAB
26 #endif
27 #define DEBUG_SUBSYSTEM S_LDLM
28
29 #ifdef __KERNEL__
30 # include <linux/module.h>
31 # include <linux/slab.h>
32 # include <linux/init.h>
33 # include <linux/wait.h>
34 #else
35 # include <liblustre.h>
36 #endif
37
38 #include <linux/lustre_dlm.h>
39 #include <linux/obd_class.h>
40 #include "ldlm_internal.h"
41
42 extern kmem_cache_t *ldlm_resource_slab;
43 extern kmem_cache_t *ldlm_lock_slab;
44 extern struct lustre_lock ldlm_handle_lock;
45 extern struct list_head ldlm_namespace_list;
46 extern int (*mds_reint_p)(int offset, struct ptlrpc_request *req);
47 extern int (*mds_getattr_name_p)(int offset, struct ptlrpc_request *req);
48
49 static DECLARE_MUTEX(ldlm_ref_sem);
50 static int ldlm_refcount = 0;
51
52 /* LDLM state */ 
53
54 static struct ldlm_state *ldlm ;
55
56 inline unsigned long round_timeout(unsigned long timeout)
57 {
58         return ((timeout / HZ) + 1) * HZ;
59 }
60
61 #ifdef __KERNEL__
62 /* XXX should this be per-ldlm? */
63 static struct list_head waiting_locks_list;
64 static spinlock_t waiting_locks_spinlock;
65 static struct timer_list waiting_locks_timer;
66
67 static struct expired_lock_thread {
68         wait_queue_head_t         elt_waitq;
69         int                       elt_state;
70         struct list_head          elt_expired_locks;
71         spinlock_t                elt_lock;
72 } expired_lock_thread;
73 #endif
74
75 #define ELT_STOPPED   0
76 #define ELT_READY     1
77 #define ELT_TERMINATE 2
78
79 struct ldlm_bl_pool {
80         spinlock_t              blp_lock;
81         struct list_head        blp_list;
82         wait_queue_head_t       blp_waitq;
83         atomic_t                blp_num_threads;
84         struct completion       blp_comp;
85 };
86
87 struct ldlm_bl_work_item {
88         struct list_head        blwi_entry;
89         struct ldlm_namespace   *blwi_ns;
90         struct ldlm_lock_desc   blwi_ld;
91         struct ldlm_lock        *blwi_lock;
92 };
93
94 #ifdef __KERNEL__
95
96 static inline int have_expired_locks(void)
97 {
98         int need_to_run;
99
100         spin_lock_bh(&expired_lock_thread.elt_lock);
101         need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
102         spin_unlock_bh(&expired_lock_thread.elt_lock);
103
104         RETURN(need_to_run);
105 }
106
107 static int expired_lock_main(void *arg)
108 {
109         struct list_head *expired = &expired_lock_thread.elt_expired_locks;
110         struct l_wait_info lwi = { 0 };
111         unsigned long flags;
112
113         ENTRY;
114         lock_kernel();
115         kportal_daemonize("ldlm_elt");
116
117         SIGNAL_MASK_LOCK(current, flags);
118         sigfillset(&current->blocked);
119         RECALC_SIGPENDING;
120         SIGNAL_MASK_UNLOCK(current, flags);
121
122         unlock_kernel();
123
124         expired_lock_thread.elt_state = ELT_READY;
125         wake_up(&expired_lock_thread.elt_waitq);
126
127         while (1) {
128                 l_wait_event(expired_lock_thread.elt_waitq,
129                              have_expired_locks() ||
130                              expired_lock_thread.elt_state == ELT_TERMINATE,
131                              &lwi);
132
133                 spin_lock_bh(&expired_lock_thread.elt_lock);
134                 while (!list_empty(expired)) {
135                         struct ldlm_lock *lock = list_entry(expired->next,
136                                                             struct ldlm_lock,
137                                                             l_pending_chain);
138                         spin_unlock_bh(&expired_lock_thread.elt_lock);
139
140                         ptlrpc_fail_export(lock->l_export);
141
142                         spin_lock_bh(&expired_lock_thread.elt_lock);
143                 }
144                 spin_unlock_bh(&expired_lock_thread.elt_lock);
145
146                 if (expired_lock_thread.elt_state == ELT_TERMINATE)
147                         break;
148         }
149
150         expired_lock_thread.elt_state = ELT_STOPPED;
151         wake_up(&expired_lock_thread.elt_waitq);
152         RETURN(0);
153 }
154
155 static void waiting_locks_callback(unsigned long unused)
156 {
157         struct ldlm_lock *lock;
158         char str[PTL_NALFMT_SIZE];
159
160         spin_lock_bh(&waiting_locks_spinlock);
161         while (!list_empty(&waiting_locks_list)) {
162                 lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
163                                   l_pending_chain);
164
165                 if (lock->l_callback_timeout > jiffies)
166                         break;
167
168                 LDLM_ERROR(lock, "lock callback timer expired: evicting client "
169                            "%s@%s nid "LPX64" (%s) ",
170                            lock->l_export->exp_client_uuid.uuid,
171                            lock->l_export->exp_connection->c_remote_uuid.uuid,
172                            lock->l_export->exp_connection->c_peer.peer_nid,
173                            portals_nid2str(lock->l_export->exp_connection->c_peer.peer_ni->pni_number,
174                                            lock->l_export->exp_connection->c_peer.peer_nid,
175                                            str));
176
177                 spin_lock_bh(&expired_lock_thread.elt_lock);
178                 list_del(&lock->l_pending_chain);
179                 list_add(&lock->l_pending_chain,
180                          &expired_lock_thread.elt_expired_locks);
181                 spin_unlock_bh(&expired_lock_thread.elt_lock);
182                 wake_up(&expired_lock_thread.elt_waitq);
183         }
184
185         spin_unlock_bh(&waiting_locks_spinlock);
186 }
187
188 /*
189  * Indicate that we're waiting for a client to call us back cancelling a given
190  * lock.  We add it to the pending-callback chain, and schedule the lock-timeout
191  * timer to fire appropriately.  (We round up to the next second, to avoid
192  * floods of timer firings during periods of high lock contention and traffic).
193  */
194 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
195 {
196         unsigned long timeout_rounded;
197
198         spin_lock_bh(&waiting_locks_spinlock);
199         if (!list_empty(&lock->l_pending_chain)) {
200                 LDLM_DEBUG(lock, "not re-adding to wait list");
201                 spin_unlock_bh(&waiting_locks_spinlock);
202                 return 0;
203         }
204         LDLM_DEBUG(lock, "adding to wait list");
205
206         lock->l_callback_timeout = jiffies + (obd_timeout * HZ / 2);
207
208         timeout_rounded = round_timeout(lock->l_callback_timeout);
209
210         if (timeout_rounded < waiting_locks_timer.expires ||
211             !timer_pending(&waiting_locks_timer)) {
212                 mod_timer(&waiting_locks_timer, timeout_rounded);
213         }
214         list_add_tail(&lock->l_pending_chain, &waiting_locks_list); /* FIFO */
215         spin_unlock_bh(&waiting_locks_spinlock);
216         return 1;
217 }
218
219 /*
220  * Remove a lock from the pending list, likely because it had its cancellation
221  * callback arrive without incident.  This adjusts the lock-timeout timer if
222  * needed.  Returns 0 if the lock wasn't pending after all, 1 if it was.
223  */
224 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
225 {
226         struct list_head *list_next;
227
228         if (lock->l_export == NULL) {
229                 /* We don't have a "waiting locks list" on clients. */
230                 LDLM_DEBUG(lock, "client lock: no-op");
231                 return 0;
232         }
233
234         spin_lock_bh(&waiting_locks_spinlock);
235
236         if (list_empty(&lock->l_pending_chain)) {
237                 spin_unlock_bh(&waiting_locks_spinlock);
238                 LDLM_DEBUG(lock, "wasn't waiting");
239                 return 0;
240         }
241
242         list_next = lock->l_pending_chain.next;
243         if (lock->l_pending_chain.prev == &waiting_locks_list) {
244                 /* Removing the head of the list, adjust timer. */
245                 if (list_next == &waiting_locks_list) {
246                         /* No more, just cancel. */
247                         del_timer(&waiting_locks_timer);
248                 } else {
249                         struct ldlm_lock *next;
250                         next = list_entry(list_next, struct ldlm_lock,
251                                           l_pending_chain);
252                         mod_timer(&waiting_locks_timer,
253                                   round_timeout(next->l_callback_timeout));
254                 }
255         }
256         list_del_init(&lock->l_pending_chain);
257         spin_unlock_bh(&waiting_locks_spinlock);
258         LDLM_DEBUG(lock, "removed");
259         return 1;
260 }
261
262 #else /* !__KERNEL__ */
263
264 static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
265 {
266         RETURN(1);
267 }
268
269 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
270 {
271         RETURN(0);
272 }
273
274 #endif /* __KERNEL__ */
275
276 static void ldlm_failed_ast(struct ldlm_lock *lock, int rc, char *ast_type)
277 {
278         const struct ptlrpc_connection *conn = lock->l_export->exp_connection;
279         char str[PTL_NALFMT_SIZE];
280
281         CERROR("%s AST failed (%d) for res "LPU64"/"LPU64
282                ", mode %s: evicting client %s@%s NID "LPX64" (%s)\n",
283                ast_type, rc,
284                lock->l_resource->lr_name.name[0],
285                lock->l_resource->lr_name.name[1],
286                ldlm_lockname[lock->l_granted_mode],
287                lock->l_export->exp_client_uuid.uuid,
288                conn->c_remote_uuid.uuid, conn->c_peer.peer_nid,
289                portals_nid2str(conn->c_peer.peer_ni->pni_number,
290                                conn->c_peer.peer_nid, str));
291         ptlrpc_fail_export(lock->l_export);
292 }
293
294 int ldlm_server_blocking_ast(struct ldlm_lock *lock,
295                              struct ldlm_lock_desc *desc,
296                              void *data, int flag)
297 {
298         struct ldlm_request *body;
299         struct ptlrpc_request *req;
300         int rc = 0, size = sizeof(*body);
301         ENTRY;
302
303         if (flag == LDLM_CB_CANCELING) {
304                 /* Don't need to do anything here. */
305                 RETURN(0);
306         }
307
308         LASSERT(lock);
309
310         l_lock(&lock->l_resource->lr_namespace->ns_lock);
311         if (lock->l_granted_mode != lock->l_req_mode) {
312                 /* this blocking AST will be communicated as part of the
313                  * completion AST instead */
314                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
315                 LDLM_DEBUG(lock, "lock not granted, not sending blocking AST");                 RETURN(0);
316         }
317
318         if (lock->l_destroyed) {
319                 /* What's the point? */
320                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
321                 RETURN(0);
322         }
323
324 #if 0
325         if (LTIME_S(CURRENT_TIME) - lock->l_export->exp_last_request_time > 30){
326                 ldlm_failed_ast(lock, -ETIMEDOUT, "Not-attempted blocking");
327                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
328                 RETURN(-ETIMEDOUT);
329         }
330 #endif
331
332         req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
333                               LDLM_BL_CALLBACK, 1, &size, NULL);
334         if (req == NULL) {
335                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
336                 RETURN(-ENOMEM);
337         }
338
339         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
340         memcpy(&body->lock_handle1, &lock->l_remote_handle,
341                sizeof(body->lock_handle1));
342         memcpy(&body->lock_desc, desc, sizeof(*desc));
343         body->lock_flags |= (lock->l_flags & LDLM_AST_FLAGS);
344
345         LDLM_DEBUG(lock, "server preparing blocking AST");
346         req->rq_replen = lustre_msg_size(0, NULL);
347
348         if (lock->l_granted_mode == lock->l_req_mode)
349                 ldlm_add_waiting_lock(lock);
350         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
351
352         req->rq_send_state = LUSTRE_IMP_FULL;
353         req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
354         rc = ptlrpc_queue_wait(req);
355         if (rc == -ETIMEDOUT || rc == -EINTR) {
356 #ifdef __KERNEL__
357                 ldlm_del_waiting_lock(lock);
358                 ldlm_failed_ast(lock, rc, "blocking");
359 #else
360                 /* XXX
361                  * Here we treat all clients as liblustre. When BLOCKING AST
362                  * timeout we don't evicting the client and only cancel
363                  * the lock.
364                  * restore to orignial implementation later!!!
365                  * XXX
366                  */
367                 CERROR("BLOCKING AST to client (nid "LPU64") timeout, "
368                        "simply cancel lock 0x%p\n",
369                        req->rq_connection->c_peer.peer_nid, lock);
370                 ldlm_lock_cancel(lock);
371                 rc = -ERESTART;
372 #endif
373         } else if (rc) {
374                 if (rc == -EINVAL)
375                         CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
376                                "from blocking AST for lock %p--normal race\n",
377                                req->rq_connection->c_peer.peer_nid,
378                                req->rq_repmsg->status, lock);
379                 else if (rc == -ENOTCONN)
380                         CDEBUG(D_DLMTRACE, "client (nid "LPU64") returned %d "
381                                "from blocking AST for lock %p--this client was "
382                                "probably rebooted while it held a lock, nothing"
383                                " serious\n",req->rq_connection->c_peer.peer_nid,
384                                req->rq_repmsg->status, lock);
385                 else
386                         CDEBUG(D_ERROR, "client (nid "LPU64") returned %d "
387                                "from blocking AST for lock %p\n",
388                                req->rq_connection->c_peer.peer_nid,
389                                (req->rq_repmsg != NULL)?
390                                req->rq_repmsg->status : 0,
391                                lock);
392                 LDLM_DEBUG(lock, "client sent rc %d rq_status %d from blocking "
393                            "AST", rc, req->rq_status);
394                 ldlm_lock_cancel(lock);
395                 /* Server-side AST functions are called from ldlm_reprocess_all,
396                  * which needs to be told to please restart its reprocessing. */
397                 rc = -ERESTART;
398         }
399
400         ptlrpc_req_finished(req);
401
402         RETURN(rc);
403 }
404
405 /* XXX copied from ptlrpc/service.c */
406 static long timeval_sub(struct timeval *large, struct timeval *small)
407 {
408         return (large->tv_sec - small->tv_sec) * 1000000 +
409                 (large->tv_usec - small->tv_usec);
410 }
411
412 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data)
413 {
414         struct ldlm_request *body;
415         struct ptlrpc_request *req;
416         struct timeval granted_time;
417         long total_enqueue_wait;
418         int rc = 0, size = sizeof(*body);
419         ENTRY;
420
421         if (lock == NULL) {
422                 LBUG();
423                 RETURN(-EINVAL);
424         }
425
426         do_gettimeofday(&granted_time);
427         total_enqueue_wait = timeval_sub(&granted_time, &lock->l_enqueued_time);
428
429         if (total_enqueue_wait / 1000000 > obd_timeout)
430                 LDLM_ERROR(lock, "enqueue wait took %ldus", total_enqueue_wait);
431
432         req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
433                               LDLM_CP_CALLBACK, 1, &size, NULL);
434         if (!req)
435                 RETURN(-ENOMEM);
436
437         body = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*body));
438         memcpy(&body->lock_handle1, &lock->l_remote_handle,
439                sizeof(body->lock_handle1));
440         body->lock_flags = flags;
441         ldlm_lock2desc(lock, &body->lock_desc);
442
443         LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
444                    total_enqueue_wait);
445         req->rq_replen = lustre_msg_size(0, NULL);
446
447         req->rq_send_state = LUSTRE_IMP_FULL;
448         req->rq_timeout = 2; /* 2 second timeout for initial AST reply */
449
450         /* We only send real blocking ASTs after the lock is granted */
451         l_lock(&lock->l_resource->lr_namespace->ns_lock);
452         if (lock->l_flags & LDLM_FL_AST_SENT) {
453                 body->lock_flags |= LDLM_FL_AST_SENT;
454                 ldlm_add_waiting_lock(lock); /* start the lock-timeout clock */
455         }
456         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
457
458         rc = ptlrpc_queue_wait(req);
459         if (rc == -ETIMEDOUT || rc == -EINTR) {
460                 ldlm_del_waiting_lock(lock);
461                 ldlm_failed_ast(lock, rc, "completion");
462         } else if (rc) {
463                 LDLM_ERROR(lock, "client sent rc %d rq_status %d from "
464                            "completion AST\n", rc, req->rq_status);
465                 ldlm_lock_cancel(lock);
466                 /* Server-side AST functions are called from ldlm_reprocess_all,
467                  * which needs to be told to please restart its reprocessing. */
468                 rc = -ERESTART;
469         }
470         ptlrpc_req_finished(req);
471
472         RETURN(rc);
473 }
474
475 int ldlm_handle_enqueue(struct ptlrpc_request *req,
476                         ldlm_completion_callback completion_callback,
477                         ldlm_blocking_callback blocking_callback)
478 {
479         struct obd_device *obddev = req->rq_export->exp_obd;
480         struct ldlm_reply *dlm_rep;
481         struct ldlm_request *dlm_req;
482         int rc, size = sizeof(*dlm_rep), cookielen = 0;
483         __u32 flags;
484         ldlm_error_t err;
485         struct ldlm_lock *lock = NULL;
486         void *cookie = NULL;
487         ENTRY;
488
489         LDLM_DEBUG_NOLOCK("server-side enqueue handler START");
490
491         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
492                                       lustre_swab_ldlm_request);
493         if (dlm_req == NULL) {
494                 CERROR ("Can't unpack dlm_req\n");
495                 RETURN (-EFAULT);
496         }
497
498         flags = dlm_req->lock_flags;
499         if (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN &&
500             (flags & LDLM_FL_HAS_INTENT)) {
501                 /* In this case, the reply buffer is allocated deep in
502                  * local_lock_enqueue by the policy function. */
503                 cookie = req;
504                 cookielen = sizeof(*req);
505         } else {
506                 rc = lustre_pack_reply(req, 1, &size, NULL);
507                 if (rc) {
508                         CERROR("out of memory\n");
509                         RETURN(-ENOMEM);
510                 }
511                 if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
512                         cookie = &dlm_req->lock_desc.l_policy_data;
513                         cookielen = sizeof(ldlm_policy_data_t);
514                 }
515         }
516
517         /* The lock's callback data might be set in the policy function */
518         lock = ldlm_lock_create(obddev->obd_namespace,
519                                 &dlm_req->lock_handle2,
520                                 dlm_req->lock_desc.l_resource.lr_name,
521                                 dlm_req->lock_desc.l_resource.lr_type,
522                                 dlm_req->lock_desc.l_req_mode,
523                                 blocking_callback, completion_callback, NULL);
524         if (!lock)
525                 GOTO(out, err = -ENOMEM);
526
527         do_gettimeofday(&lock->l_enqueued_time);
528         memcpy(&lock->l_remote_handle, &dlm_req->lock_handle1,
529                sizeof(lock->l_remote_handle));
530         LDLM_DEBUG(lock, "server-side enqueue handler, new lock created");
531
532         LASSERT(req->rq_export);
533         lock->l_export = class_export_get(req->rq_export);
534         l_lock(&lock->l_resource->lr_namespace->ns_lock);
535         list_add(&lock->l_export_chain,
536                  &lock->l_export->exp_ldlm_data.led_held_locks);
537         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
538
539         err = ldlm_lock_enqueue(obddev->obd_namespace, &lock, cookie, cookielen,
540                                 &flags);
541         if (err)
542                 GOTO(out, err);
543
544         dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
545         dlm_rep->lock_flags = flags;
546
547         ldlm_lock2handle(lock, &dlm_rep->lock_handle);
548         if (dlm_req->lock_desc.l_resource.lr_type != LDLM_PLAIN) {
549                 memcpy(&dlm_rep->lock_policy_data, &lock->l_policy_data,
550                        cookielen);
551         }
552         if (dlm_rep->lock_flags & LDLM_FL_LOCK_CHANGED) {
553                 memcpy(&dlm_rep->lock_resource_name, &lock->l_resource->lr_name,
554                        sizeof(dlm_rep->lock_resource_name));
555                 dlm_rep->lock_mode = lock->l_req_mode;
556         }
557
558         /* We never send a blocking AST until the lock is granted, but
559          * we can tell it right now */
560         l_lock(&lock->l_resource->lr_namespace->ns_lock);
561         if (lock->l_flags & LDLM_FL_AST_SENT) {
562                 dlm_rep->lock_flags |= LDLM_FL_AST_SENT;
563                 if (lock->l_granted_mode == lock->l_req_mode)
564                         ldlm_add_waiting_lock(lock);
565         }
566         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
567
568         EXIT;
569  out:
570         req->rq_status = err;
571
572         /* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
573          * ldlm_reprocess_all.  If this moves, revisit that code. -phil */
574         if (lock) {
575                 LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
576                            "(err=%d)", err);
577                 if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK)
578                         ldlm_reprocess_all(lock->l_resource);
579                 LDLM_LOCK_PUT(lock);
580         }
581         LDLM_DEBUG_NOLOCK("server-side enqueue handler END (lock %p)", lock);
582
583         return 0;
584 }
585
586 int ldlm_handle_convert(struct ptlrpc_request *req)
587 {
588         struct ldlm_request *dlm_req;
589         struct ldlm_reply *dlm_rep;
590         struct ldlm_lock *lock;
591         int rc, size = sizeof(*dlm_rep);
592         ENTRY;
593
594         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
595                                       lustre_swab_ldlm_request);
596         if (dlm_req == NULL) {
597                 CERROR ("Can't unpack dlm_req\n");
598                 RETURN (-EFAULT);
599         }
600
601         rc = lustre_pack_reply(req, 1, &size, NULL);
602         if (rc) {
603                 CERROR("out of memory\n");
604                 RETURN(-ENOMEM);
605         }
606         dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
607         dlm_rep->lock_flags = dlm_req->lock_flags;
608
609         lock = ldlm_handle2lock(&dlm_req->lock_handle1);
610         if (!lock) {
611                 req->rq_status = EINVAL;
612         } else {
613                 LDLM_DEBUG(lock, "server-side convert handler START");
614                 ldlm_lock_convert(lock, dlm_req->lock_desc.l_req_mode,
615                                   &dlm_rep->lock_flags);
616                 if (ldlm_del_waiting_lock(lock))
617                         CDEBUG(D_DLMTRACE, "converted waiting lock %p\n", lock);
618                 req->rq_status = 0;
619         }
620
621         if (lock) {
622                 ldlm_reprocess_all(lock->l_resource);
623                 LDLM_DEBUG(lock, "server-side convert handler END");
624                 LDLM_LOCK_PUT(lock);
625         } else
626                 LDLM_DEBUG_NOLOCK("server-side convert handler END");
627
628         RETURN(0);
629 }
630
631 int ldlm_handle_cancel(struct ptlrpc_request *req)
632 {
633         struct ldlm_request *dlm_req;
634         struct ldlm_lock *lock;
635         char str[PTL_NALFMT_SIZE];
636         int rc;
637         ENTRY;
638
639         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
640                                       lustre_swab_ldlm_request);
641         if (dlm_req == NULL) {
642                 CERROR("bad request buffer for cancel\n");
643                 RETURN(-EFAULT);
644         }
645
646         rc = lustre_pack_reply(req, 0, NULL, NULL);
647         if (rc) {
648                 CERROR("out of memory\n");
649                 RETURN(-ENOMEM);
650         }
651
652         lock = ldlm_handle2lock(&dlm_req->lock_handle1);
653         if (!lock) {
654                 CERROR("received cancel for unknown lock cookie "LPX64
655                        " from nid "LPX64" (%s)\n", dlm_req->lock_handle1.cookie,
656                        req->rq_connection->c_peer.peer_nid,
657                        portals_nid2str(req->rq_connection->c_peer.peer_ni->pni_number,
658                                        req->rq_connection->c_peer.peer_nid, str));
659                 LDLM_DEBUG_NOLOCK("server-side cancel handler stale lock "
660                                   "(cookie "LPU64")",
661                                   dlm_req->lock_handle1.cookie);
662                 req->rq_status = ESTALE;
663         } else {
664                 LDLM_DEBUG(lock, "server-side cancel handler START");
665                 ldlm_lock_cancel(lock);
666                 if (ldlm_del_waiting_lock(lock))
667                         CDEBUG(D_DLMTRACE, "cancelled waiting lock %p\n", lock);
668                 req->rq_status = 0;
669         }
670
671         if (ptlrpc_reply(req) != 0)
672                 LBUG();
673
674         if (lock) {
675                 ldlm_reprocess_all(lock->l_resource);
676                 LDLM_DEBUG(lock, "server-side cancel handler END");
677                 LDLM_LOCK_PUT(lock);
678         }
679
680         RETURN(0);
681 }
682
683 static void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
684                                     struct ldlm_lock_desc *ld,
685                                     struct ldlm_lock *lock)
686 {
687         int do_ast;
688         ENTRY;
689
690         l_lock(&ns->ns_lock);
691         LDLM_DEBUG(lock, "client blocking AST callback handler START");
692
693         lock->l_flags |= LDLM_FL_CBPENDING;
694         do_ast = (!lock->l_readers && !lock->l_writers);
695
696         if (do_ast) {
697                 LDLM_DEBUG(lock, "already unused, calling "
698                            "callback (%p)", lock->l_blocking_ast);
699                 if (lock->l_blocking_ast != NULL) {
700                         l_unlock(&ns->ns_lock);
701                         l_check_no_ns_lock(ns);
702                         lock->l_blocking_ast(lock, ld, lock->l_ast_data,
703                                              LDLM_CB_BLOCKING);
704                         l_lock(&ns->ns_lock);
705                 }
706         } else {
707                 LDLM_DEBUG(lock, "Lock still has references, will be"
708                            " cancelled later");
709         }
710
711         LDLM_DEBUG(lock, "client blocking callback handler END");
712         l_unlock(&ns->ns_lock);
713         LDLM_LOCK_PUT(lock);
714         EXIT;
715 }
716
717 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
718                                     struct ldlm_namespace *ns,
719                                     struct ldlm_request *dlm_req,
720                                     struct ldlm_lock *lock)
721 {
722         LIST_HEAD(ast_list);
723         ENTRY;
724
725         l_lock(&ns->ns_lock);
726         LDLM_DEBUG(lock, "client completion callback handler START");
727
728         /* If we receive the completion AST before the actual enqueue returned,
729          * then we might need to switch lock modes, resources, or extents. */
730         if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
731                 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
732                 LDLM_DEBUG(lock, "completion AST, new lock mode");
733         }
734         if (lock->l_resource->lr_type != LDLM_PLAIN)
735                 memcpy(&lock->l_policy_data, &dlm_req->lock_desc.l_policy_data,
736                        sizeof(lock->l_policy_data));
737
738         ldlm_resource_unlink_lock(lock);
739         if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
740                    &lock->l_resource->lr_name,
741                    sizeof(lock->l_resource->lr_name)) != 0) {
742                 ldlm_lock_change_resource(ns, lock,
743                                          dlm_req->lock_desc.l_resource.lr_name);
744                 LDLM_DEBUG(lock, "completion AST, new resource");
745         }
746
747         if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
748                 lock->l_flags |= LDLM_FL_CBPENDING;
749                 LDLM_DEBUG(lock, "completion AST includes blocking AST");
750         }
751
752         lock->l_resource->lr_tmp = &ast_list;
753         ldlm_grant_lock(lock, req, sizeof(*req), 1);
754         lock->l_resource->lr_tmp = NULL;
755         LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
756         l_unlock(&ns->ns_lock);
757         LDLM_LOCK_PUT(lock);
758
759         ldlm_run_ast_work(ns, &ast_list);
760
761         LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
762                           lock);
763         EXIT;
764 }
765
766 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
767 {
768         req->rq_status = rc;
769         rc = lustre_pack_reply(req, 0, NULL, NULL);
770         if (rc)
771                 return rc;
772         return ptlrpc_reply(req);
773 }
774
775 #ifdef __KERNEL__
776 static int ldlm_bl_to_thread(struct ldlm_state *ldlm, struct ldlm_namespace *ns,
777                              struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
778 {
779         struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
780         struct ldlm_bl_work_item *blwi;
781         ENTRY;
782
783         OBD_ALLOC(blwi, sizeof(*blwi));
784         if (blwi == NULL)
785                 RETURN(-ENOMEM);
786
787         blwi->blwi_ns = ns;
788         blwi->blwi_ld = *ld;
789         blwi->blwi_lock = lock;
790
791         spin_lock(&blp->blp_lock);
792         list_add_tail(&blwi->blwi_entry, &blp->blp_list);
793         wake_up(&blp->blp_waitq);
794         spin_unlock(&blp->blp_lock);
795
796         RETURN(0);
797 }
798 #endif
799
800 static int ldlm_callback_handler(struct ptlrpc_request *req)
801 {
802         struct ldlm_namespace *ns;
803         struct ldlm_request *dlm_req;
804         struct ldlm_lock *lock;
805         char str[PTL_NALFMT_SIZE];
806         int rc;
807         ENTRY;
808
809         /* Requests arrive in sender's byte order.  The ptlrpc service
810          * handler has already checked and, if necessary, byte-swapped the
811          * incoming request message body, but I am responsible for the
812          * message buffers. */
813
814         if (req->rq_export == NULL) {
815                 struct ldlm_request *dlm_req;
816
817                 CDEBUG(D_RPCTRACE, "operation %d from nid "LPX64" (%s) with bad "
818                        "export cookie "LPX64" (ptl req %d/rep %d); this is "
819                        "normal if this node rebooted with a lock held\n",
820                        req->rq_reqmsg->opc, req->rq_connection->c_peer.peer_nid,
821                        portals_nid2str(req->rq_connection->c_peer.peer_ni->pni_number,
822                                        req->rq_connection->c_peer.peer_nid, str),
823                        req->rq_reqmsg->handle.cookie,
824                        req->rq_request_portal, req->rq_reply_portal);
825
826                 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
827                                              lustre_swab_ldlm_request);
828                 if (dlm_req != NULL)
829                         CDEBUG(D_RPCTRACE, "--> lock cookie: "LPX64"\n",
830                                dlm_req->lock_handle1.cookie);
831
832                 ldlm_callback_reply(req, -ENOTCONN);
833                 RETURN(0);
834         }
835
836         if (req->rq_reqmsg->opc == LDLM_BL_CALLBACK) {
837                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
838         } else if (req->rq_reqmsg->opc == LDLM_CP_CALLBACK) {
839                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CP_CALLBACK, 0);
840         } else if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
841                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
842         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
843                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
844         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
845                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
846         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
847                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
848         } else if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
849                 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
850         } else {
851                 ldlm_callback_reply(req, -EPROTO);
852                 RETURN(0);
853         }
854
855         LASSERT(req->rq_export != NULL);
856         LASSERT(req->rq_export->exp_obd != NULL);
857
858         /* FIXME - how to send reply */
859         if (req->rq_reqmsg->opc == OBD_LOG_CANCEL) {
860                 int rc = llog_origin_handle_cancel(req);
861                 ldlm_callback_reply(req, rc);
862                 RETURN(0);
863         }
864         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CREATE) {
865                 int rc = llog_origin_handle_create(req);
866                 req->rq_status = rc;
867                 ptlrpc_reply(req);
868                 RETURN(0);
869         }
870         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_NEXT_BLOCK) {
871                 int rc = llog_origin_handle_next_block(req);
872                 req->rq_status = rc;
873                 ptlrpc_reply(req);
874                 RETURN(0);
875         }
876         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_READ_HEADER) {
877                 int rc = llog_origin_handle_read_header(req);
878                 req->rq_status = rc;
879                 ptlrpc_reply(req);
880                 RETURN(0);
881         }
882         if (req->rq_reqmsg->opc == LLOG_ORIGIN_HANDLE_CLOSE) {
883                 int rc = llog_origin_handle_close(req);
884                 ldlm_callback_reply(req, rc);
885                 RETURN(0);
886         }
887
888         ns = req->rq_export->exp_obd->obd_namespace;
889         LASSERT(ns != NULL);
890
891         dlm_req = lustre_swab_reqbuf (req, 0, sizeof (*dlm_req),
892                                       lustre_swab_ldlm_request);
893         if (dlm_req == NULL) {
894                 CERROR ("can't unpack dlm_req\n");
895                 ldlm_callback_reply (req, -EPROTO);
896                 RETURN (0);
897         }
898
899         lock = ldlm_handle2lock_ns(ns, &dlm_req->lock_handle1);
900         if (!lock) {
901                 CDEBUG(D_INODE, "callback on lock "LPX64" - lock disappeared\n",
902                        dlm_req->lock_handle1.cookie);
903                 ldlm_callback_reply(req, -EINVAL);
904                 RETURN(0);
905         }
906
907         /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
908         lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
909
910         /* We want the ost thread to get this reply so that it can respond
911          * to ost requests (write cache writeback) that might be triggered
912          * in the callback.
913          *
914          * But we'd also like to be able to indicate in the reply that we're
915          * cancelling right now, because it's unused, or have an intent result
916          * in the reply, so we might have to push the responsibility for sending
917          * the reply down into the AST handlers, alas. */
918         if (req->rq_reqmsg->opc != LDLM_BL_CALLBACK)
919                 ldlm_callback_reply(req, 0);
920
921         switch (req->rq_reqmsg->opc) {
922         case LDLM_BL_CALLBACK:
923                 CDEBUG(D_INODE, "blocking ast\n");
924 #ifdef __KERNEL__
925                 rc = ldlm_bl_to_thread(ldlm, ns, &dlm_req->lock_desc, lock);
926                 ldlm_callback_reply(req, rc);
927 #else
928                 rc = 0;
929                 ldlm_callback_reply(req, rc);
930                 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
931 #endif
932                 break;
933         case LDLM_CP_CALLBACK:
934                 CDEBUG(D_INODE, "completion ast\n");
935                 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
936                 break;
937         default:
938                 LBUG();                         /* checked above */
939         }
940
941         RETURN(0);
942 }
943
944 static int ldlm_cancel_handler(struct ptlrpc_request *req)
945 {
946         int rc;
947         ENTRY;
948
949         /* Requests arrive in sender's byte order.  The ptlrpc service
950          * handler has already checked and, if necessary, byte-swapped the
951          * incoming request message body, but I am responsible for the
952          * message buffers. */
953
954         if (req->rq_export == NULL) {
955                 struct ldlm_request *dlm_req;
956                 CERROR("operation %d with bad export (ptl req %d/rep %d)\n",
957                        req->rq_reqmsg->opc, req->rq_request_portal,
958                        req->rq_reply_portal);
959                 CERROR("--> export cookie: "LPX64"\n",
960                        req->rq_reqmsg->handle.cookie);
961                 dlm_req = lustre_swab_reqbuf(req, 0, sizeof (*dlm_req),
962                                              lustre_swab_ldlm_request);
963                 if (dlm_req != NULL)
964                         ldlm_lock_dump_handle(D_ERROR, &dlm_req->lock_handle1);
965                 RETURN(-ENOTCONN);
966         }
967
968         switch (req->rq_reqmsg->opc) {
969
970         /* XXX FIXME move this back to mds/handler.c, bug 249 */
971         case LDLM_CANCEL:
972                 CDEBUG(D_INODE, "cancel\n");
973                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CANCEL, 0);
974                 rc = ldlm_handle_cancel(req);
975                 if (rc)
976                         break;
977                 RETURN(0);
978
979         default:
980                 CERROR("invalid opcode %d\n", req->rq_reqmsg->opc);
981                 RETURN(-EINVAL);
982         }
983
984         RETURN(0);
985 }
986
987 #ifdef __KERNEL__
988 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
989 {
990         struct ldlm_bl_work_item *blwi = NULL;
991
992         spin_lock(&blp->blp_lock);
993         if (!list_empty(&blp->blp_list)) {
994                 blwi = list_entry(blp->blp_list.next, struct ldlm_bl_work_item,
995                                   blwi_entry);
996                 list_del(&blwi->blwi_entry);
997         }
998         spin_unlock(&blp->blp_lock);
999
1000         return blwi;
1001 }
1002
1003 struct ldlm_bl_thread_data {
1004         int                     bltd_num;
1005         struct ldlm_bl_pool     *bltd_blp;
1006 };
1007
1008 static int ldlm_bl_thread_main(void *arg)
1009 {
1010         struct ldlm_bl_thread_data *bltd = arg;
1011         struct ldlm_bl_pool *blp = bltd->bltd_blp;
1012         unsigned long flags;
1013         ENTRY;
1014
1015         /* XXX boiler-plate */
1016         {
1017                 char name[sizeof(current->comm)];
1018                 snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
1019                          bltd->bltd_num);
1020                 kportal_daemonize(name);
1021         }
1022         SIGNAL_MASK_LOCK(current, flags);
1023         sigfillset(&current->blocked);
1024         RECALC_SIGPENDING;
1025         SIGNAL_MASK_UNLOCK(current, flags);
1026
1027         atomic_inc(&blp->blp_num_threads);
1028         complete(&blp->blp_comp);
1029
1030         while(1) {
1031                 struct l_wait_info lwi = { 0 };
1032                 struct ldlm_bl_work_item *blwi = NULL;
1033
1034                 l_wait_event_exclusive(blp->blp_waitq,
1035                                        (blwi = ldlm_bl_get_work(blp)) != NULL,
1036                                        &lwi);
1037
1038                 if (blwi->blwi_ns == NULL)
1039                         break;
1040
1041                 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
1042                                         blwi->blwi_lock);
1043                 OBD_FREE(blwi, sizeof(*blwi));
1044         }
1045
1046         atomic_dec(&blp->blp_num_threads);
1047         complete(&blp->blp_comp);
1048         RETURN(0);
1049 }
1050
1051 #endif
1052
1053 static int ldlm_setup(void);
1054 static int ldlm_cleanup(int force);
1055
1056 int ldlm_get_ref(void)
1057 {
1058         int rc = 0;
1059         down(&ldlm_ref_sem);
1060         if (++ldlm_refcount == 1) {
1061                 rc = ldlm_setup();
1062                 if (rc)
1063                         ldlm_refcount--;
1064         }
1065         up(&ldlm_ref_sem);
1066
1067         RETURN(rc);
1068 }
1069
1070 void ldlm_put_ref(int force)
1071 {
1072         down(&ldlm_ref_sem);
1073         if (ldlm_refcount == 1) {
1074                 int rc = ldlm_cleanup(force);
1075                 if (rc)
1076                         CERROR("ldlm_cleanup failed: %d\n", rc);
1077                 else
1078                         ldlm_refcount--;
1079         } else {
1080                 ldlm_refcount--;
1081         }
1082         up(&ldlm_ref_sem);
1083
1084         EXIT;
1085 }
1086
1087 static int ldlm_setup(void)
1088 {
1089         struct ldlm_bl_pool *blp;
1090         int rc = 0;
1091 #ifdef __KERNEL__
1092         int i;
1093 #endif
1094         ENTRY;
1095
1096         if (ldlm != NULL)
1097                 RETURN(-EALREADY);
1098
1099         OBD_ALLOC(ldlm, sizeof(*ldlm));
1100         if (ldlm == NULL)
1101                 RETURN(-ENOMEM);
1102
1103 #ifdef __KERNEL__
1104         rc = ldlm_proc_setup();
1105         if (rc != 0)
1106                 GOTO(out_free, rc);
1107 #endif
1108
1109         ldlm->ldlm_cb_service =
1110                 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1111                                 LDLM_MAXREQSIZE, LDLM_CB_REQUEST_PORTAL,
1112                                 LDLM_CB_REPLY_PORTAL,
1113                                 ldlm_callback_handler, "ldlm_cbd", 
1114                                 ldlm_svc_proc_dir);
1115
1116         if (!ldlm->ldlm_cb_service) {
1117                 CERROR("failed to start service\n");
1118                 GOTO(out_proc, rc = -ENOMEM);
1119         }
1120
1121         ldlm->ldlm_cancel_service =
1122                 ptlrpc_init_svc(LDLM_NEVENTS, LDLM_NBUFS, LDLM_BUFSIZE,
1123                                 LDLM_MAXREQSIZE, LDLM_CANCEL_REQUEST_PORTAL,
1124                                 LDLM_CANCEL_REPLY_PORTAL,
1125                                 ldlm_cancel_handler, "ldlm_canceld", 
1126                                 ldlm_svc_proc_dir);
1127
1128         if (!ldlm->ldlm_cancel_service) {
1129                 CERROR("failed to start service\n");
1130                 GOTO(out_proc, rc = -ENOMEM);
1131         }
1132
1133         OBD_ALLOC(blp, sizeof(*blp));
1134         if (blp == NULL)
1135                 GOTO(out_proc, rc = -ENOMEM);
1136         ldlm->ldlm_bl_pool = blp;
1137
1138         atomic_set(&blp->blp_num_threads, 0);
1139         init_waitqueue_head(&blp->blp_waitq);
1140         spin_lock_init(&blp->blp_lock);
1141
1142         INIT_LIST_HEAD(&blp->blp_list);
1143
1144 #ifdef __KERNEL__
1145         for (i = 0; i < LDLM_NUM_THREADS; i++) {
1146                 struct ldlm_bl_thread_data bltd = {
1147                         .bltd_num = i,
1148                         .bltd_blp = blp,
1149                 };
1150                 init_completion(&blp->blp_comp);
1151                 rc = kernel_thread(ldlm_bl_thread_main, &bltd, 0);
1152                 if (rc < 0) {
1153                         CERROR("cannot start LDLM thread #%d: rc %d\n", i, rc);
1154                         LBUG();
1155                         GOTO(out_thread, rc);
1156                 }
1157                 wait_for_completion(&blp->blp_comp);
1158         }
1159
1160         rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cancel_service,
1161                                     LDLM_NUM_THREADS, "ldlm_cn");
1162         if (rc) {
1163                 LBUG();
1164                 GOTO(out_thread, rc);
1165         }
1166
1167         rc = ptlrpc_start_n_threads(NULL, ldlm->ldlm_cb_service,
1168                                     LDLM_NUM_THREADS, "ldlm_cb");
1169         if (rc) {
1170                 LBUG();
1171                 GOTO(out_thread, rc);
1172         }
1173
1174         INIT_LIST_HEAD(&expired_lock_thread.elt_expired_locks);
1175         spin_lock_init(&expired_lock_thread.elt_lock);
1176         expired_lock_thread.elt_state = ELT_STOPPED;
1177         init_waitqueue_head(&expired_lock_thread.elt_waitq);
1178
1179         rc = kernel_thread(expired_lock_main, NULL, CLONE_VM | CLONE_FS);
1180         if (rc < 0) {
1181                 CERROR("Cannot start ldlm expired-lock thread: %d\n", rc);
1182                 GOTO(out_thread, rc);
1183         }
1184
1185         wait_event(expired_lock_thread.elt_waitq,
1186                    expired_lock_thread.elt_state == ELT_READY);
1187
1188         INIT_LIST_HEAD(&waiting_locks_list);
1189         spin_lock_init(&waiting_locks_spinlock);
1190         waiting_locks_timer.function = waiting_locks_callback;
1191         waiting_locks_timer.data = 0;
1192         init_timer(&waiting_locks_timer);
1193 #endif
1194
1195         RETURN(0);
1196
1197 #ifdef __KERNEL__
1198  out_thread:
1199         ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1200         ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1201 #endif
1202
1203  out_proc:
1204 #ifdef __KERNEL__
1205         ldlm_proc_cleanup();
1206  out_free:
1207 #endif
1208         OBD_FREE(ldlm, sizeof(*ldlm));
1209         ldlm = NULL;
1210         return rc;
1211 }
1212
1213 static int ldlm_cleanup(int force)
1214 {
1215 #ifdef __KERNEL__
1216         struct ldlm_bl_pool *blp = ldlm->ldlm_bl_pool;
1217 #endif
1218         ENTRY;
1219
1220         if (!list_empty(&ldlm_namespace_list)) {
1221                 CERROR("ldlm still has namespaces; clean these up first.\n");
1222                 ldlm_dump_all_namespaces();
1223                 RETURN(-EBUSY);
1224         }
1225
1226 #ifdef __KERNEL__
1227         while (atomic_read(&blp->blp_num_threads) > 0) {
1228                 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1229
1230                 init_completion(&blp->blp_comp);
1231
1232                 spin_lock(&blp->blp_lock);
1233                 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1234                 wake_up(&blp->blp_waitq);
1235                 spin_unlock(&blp->blp_lock);
1236
1237                 wait_for_completion(&blp->blp_comp);
1238         }
1239         OBD_FREE(blp, sizeof(*blp));
1240
1241         ptlrpc_stop_all_threads(ldlm->ldlm_cb_service);
1242         ptlrpc_unregister_service(ldlm->ldlm_cb_service);
1243         ptlrpc_stop_all_threads(ldlm->ldlm_cancel_service);
1244         ptlrpc_unregister_service(ldlm->ldlm_cancel_service);
1245         ldlm_proc_cleanup();
1246
1247         expired_lock_thread.elt_state = ELT_TERMINATE;
1248         wake_up(&expired_lock_thread.elt_waitq);
1249         wait_event(expired_lock_thread.elt_waitq,
1250                    expired_lock_thread.elt_state == ELT_STOPPED);
1251
1252 #endif
1253
1254         OBD_FREE(ldlm, sizeof(*ldlm));
1255         ldlm = NULL;
1256
1257         RETURN(0);
1258 }
1259
1260 int __init ldlm_init(void)
1261 {
1262         ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1263                                                sizeof(struct ldlm_resource), 0,
1264                                                SLAB_HWCACHE_ALIGN, NULL, NULL);
1265         if (ldlm_resource_slab == NULL)
1266                 return -ENOMEM;
1267
1268         ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1269                                            sizeof(struct ldlm_lock), 0,
1270                                            SLAB_HWCACHE_ALIGN, NULL, NULL);
1271         if (ldlm_lock_slab == NULL) {
1272                 kmem_cache_destroy(ldlm_resource_slab);
1273                 return -ENOMEM;
1274         }
1275
1276         l_lock_init(&ldlm_handle_lock);
1277
1278         return 0;
1279 }
1280
1281 void __exit ldlm_exit(void)
1282 {
1283         if ( ldlm_refcount )
1284                 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1285         if (kmem_cache_destroy(ldlm_resource_slab) != 0)
1286                 CERROR("couldn't free ldlm resource slab\n");
1287         if (kmem_cache_destroy(ldlm_lock_slab) != 0)
1288                 CERROR("couldn't free ldlm lock slab\n");
1289 }
1290
1291 /* ldlm_flock.c */
1292 EXPORT_SYMBOL(ldlm_flock_completion_ast);
1293
1294 /* ldlm_lock.c */
1295 EXPORT_SYMBOL(ldlm_lock2desc);
1296 EXPORT_SYMBOL(ldlm_register_intent);
1297 EXPORT_SYMBOL(ldlm_unregister_intent);
1298 EXPORT_SYMBOL(ldlm_lockname);
1299 EXPORT_SYMBOL(ldlm_typename);
1300 EXPORT_SYMBOL(ldlm_lock2handle);
1301 EXPORT_SYMBOL(__ldlm_handle2lock);
1302 EXPORT_SYMBOL(ldlm_lock_put);
1303 EXPORT_SYMBOL(ldlm_lock_match);
1304 EXPORT_SYMBOL(ldlm_lock_cancel);
1305 EXPORT_SYMBOL(ldlm_lock_addref);
1306 EXPORT_SYMBOL(ldlm_lock_decref);
1307 EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
1308 EXPORT_SYMBOL(ldlm_lock_change_resource);
1309 EXPORT_SYMBOL(ldlm_lock_set_data);
1310 EXPORT_SYMBOL(ldlm_it2str);
1311 EXPORT_SYMBOL(ldlm_lock_dump);
1312 EXPORT_SYMBOL(ldlm_lock_dump_handle);
1313 EXPORT_SYMBOL(ldlm_cancel_locks_for_export);
1314 EXPORT_SYMBOL(ldlm_reprocess_all_ns);
1315
1316 /* ldlm_request.c */
1317 EXPORT_SYMBOL(ldlm_completion_ast);
1318 EXPORT_SYMBOL(ldlm_expired_completion_wait);
1319 EXPORT_SYMBOL(ldlm_cli_convert);
1320 EXPORT_SYMBOL(ldlm_cli_enqueue);
1321 EXPORT_SYMBOL(ldlm_cli_cancel);
1322 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1323 EXPORT_SYMBOL(ldlm_replay_locks);
1324 EXPORT_SYMBOL(ldlm_resource_foreach);
1325 EXPORT_SYMBOL(ldlm_namespace_foreach);
1326 EXPORT_SYMBOL(ldlm_namespace_foreach_res);
1327 EXPORT_SYMBOL(ldlm_change_cbdata);
1328
1329 /* ldlm_lockd.c */
1330 EXPORT_SYMBOL(ldlm_server_blocking_ast);
1331 EXPORT_SYMBOL(ldlm_server_completion_ast);
1332 EXPORT_SYMBOL(ldlm_handle_enqueue);
1333 EXPORT_SYMBOL(ldlm_handle_cancel);
1334 EXPORT_SYMBOL(ldlm_handle_convert);
1335 EXPORT_SYMBOL(ldlm_del_waiting_lock);
1336 EXPORT_SYMBOL(ldlm_get_ref);
1337 EXPORT_SYMBOL(ldlm_put_ref);
1338
1339 #if 0
1340 /* ldlm_test.c */
1341 EXPORT_SYMBOL(ldlm_test);
1342 EXPORT_SYMBOL(ldlm_regression_start);
1343 EXPORT_SYMBOL(ldlm_regression_stop);
1344 #endif
1345
1346 /* ldlm_resource.c */
1347 EXPORT_SYMBOL(ldlm_namespace_new);
1348 EXPORT_SYMBOL(ldlm_namespace_cleanup);
1349 EXPORT_SYMBOL(ldlm_namespace_free);
1350
1351 /* l_lock.c */
1352 EXPORT_SYMBOL(l_lock);
1353 EXPORT_SYMBOL(l_unlock);
1354
1355 /* ldlm_lib.c */
1356 EXPORT_SYMBOL(client_obd_setup);
1357 EXPORT_SYMBOL(client_obd_cleanup);
1358 EXPORT_SYMBOL(client_connect_import);
1359 EXPORT_SYMBOL(client_disconnect_export);
1360 EXPORT_SYMBOL(target_abort_recovery);
1361 EXPORT_SYMBOL(target_handle_connect);
1362 EXPORT_SYMBOL(target_destroy_export);
1363 EXPORT_SYMBOL(target_cancel_recovery_timer);
1364 EXPORT_SYMBOL(target_send_reply);
1365 EXPORT_SYMBOL(target_queue_recovery_request);
1366 EXPORT_SYMBOL(target_handle_ping);
1367 EXPORT_SYMBOL(target_handle_disconnect);
1368 EXPORT_SYMBOL(target_queue_final_reply);
1369 EXPORT_SYMBOL(ldlm_put_lock_into_req);