Whamcloud - gitweb
b=191
[fs/lustre-release.git] / lustre / ldlm / ldlm_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #define DEBUG_SUBSYSTEM S_LDLM
23
24 #include <linux/lustre_dlm.h>
25 #include <linux/obd_class.h>
26 #include <linux/obd.h>
27
28 static int interrupted_completion_wait(void *data)
29 {
30         RETURN(1);
31 }
32
33 static int expired_completion_wait(void *data)
34 {
35         struct ldlm_lock *lock = data;
36         struct ptlrpc_connection *conn;
37         struct obd_device *obd;
38
39         if (!lock)
40                 CERROR("NULL lock\n");
41         else if (!lock->l_connh)
42                 CERROR("lock %p has NULL connh\n", lock);
43         else if (!(obd = class_conn2obd(lock->l_connh)))
44                 CERROR("lock %p has NULL obd\n", lock);
45         else if (!(conn = obd->u.cli.cl_import.imp_connection))
46                 CERROR("lock %p has NULL connection\n", lock);
47         else
48                 class_signal_connection_failure(conn);
49         RETURN(0);
50 }
51
52 int ldlm_completion_ast(struct ldlm_lock *lock, int flags)
53 {
54         struct l_wait_info lwi =
55                 LWI_TIMEOUT_INTR(obd_timeout * HZ, expired_completion_wait,
56                                  interrupted_completion_wait, lock);
57         int rc = 0;
58         ENTRY;
59
60         if (flags == LDLM_FL_WAIT_NOREPROC)
61                 goto noreproc;
62
63         if (flags == 0) {
64                 wake_up(&lock->l_waitq);
65                 RETURN(0);
66         }
67
68         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
69                        LDLM_FL_BLOCK_CONV)))
70                 RETURN(0);
71
72         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
73                    "sleeping");
74         ldlm_lock_dump(lock);
75         ldlm_reprocess_all(lock->l_resource);
76
77  noreproc:
78         /* Go to sleep until the lock is granted or cancelled. */
79         rc = l_wait_event(lock->l_waitq,
80                           ((lock->l_req_mode == lock->l_granted_mode) ||
81                            (lock->l_flags & LDLM_FL_DESTROYED)), &lwi);
82
83         if (lock->l_flags & LDLM_FL_DESTROYED) {
84                 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
85                 RETURN(-EIO);
86         }
87
88         if (rc) {
89                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
90                            rc);
91                 RETURN(rc);
92         }
93
94         LDLM_DEBUG(lock, "client-side enqueue waking up: granted");
95         RETURN(0);
96 }
97
98 static int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
99                                   struct lustre_handle *parent_lockh,
100                                   __u64 *res_id,
101                                   __u32 type,
102                                   void *cookie, int cookielen,
103                                   ldlm_mode_t mode,
104                                   int *flags,
105                                   ldlm_completion_callback completion,
106                                   ldlm_blocking_callback blocking,
107                                   void *data,
108                                   __u32 data_len,
109                                   struct lustre_handle *lockh)
110 {
111         struct ldlm_lock *lock;
112         int err;
113         ENTRY;
114
115         if (ns->ns_client) {
116                 CERROR("Trying to enqueue local lock in a shadow namespace\n");
117                 LBUG();
118         }
119
120         lock = ldlm_lock_create(ns, parent_lockh, res_id, type, mode, data,
121                                 data_len);
122         if (!lock)
123                 GOTO(out_nolock, err = -ENOMEM);
124         LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
125
126         ldlm_lock_addref_internal(lock, mode);
127         ldlm_lock2handle(lock, lockh);
128         lock->l_connh = NULL;
129
130         err = ldlm_lock_enqueue(lock, cookie, cookielen, flags, completion,
131                                 blocking);
132         if (err != ELDLM_OK)
133                 GOTO(out, err);
134
135         if (type == LDLM_EXTENT)
136                 memcpy(cookie, &lock->l_extent, sizeof(lock->l_extent));
137         if ((*flags) & LDLM_FL_LOCK_CHANGED)
138                 memcpy(res_id, lock->l_resource->lr_name, sizeof(*res_id));
139
140         LDLM_DEBUG_NOLOCK("client-side local enqueue handler END (lock %p)",
141                           lock);
142
143         if (lock->l_completion_ast)
144                 lock->l_completion_ast(lock, *flags);
145
146         LDLM_DEBUG(lock, "client-side local enqueue END");
147         EXIT;
148  out:
149         LDLM_LOCK_PUT(lock);
150  out_nolock:
151         return err;
152 }
153
154 int ldlm_cli_enqueue(struct lustre_handle *connh,
155                      struct ptlrpc_request *req,
156                      struct ldlm_namespace *ns,
157                      struct lustre_handle *parent_lock_handle,
158                      __u64 *res_id,
159                      __u32 type,
160                      void *cookie, int cookielen,
161                      ldlm_mode_t mode,
162                      int *flags,
163                      ldlm_completion_callback completion,
164                      ldlm_blocking_callback blocking,
165                      void *data,
166                      __u32 data_len,
167                      struct lustre_handle *lockh)
168 {
169         struct ldlm_lock *lock;
170         struct ldlm_request *body;
171         struct ldlm_reply *reply;
172         int rc, size = sizeof(*body), req_passed_in = 1, is_replay;
173         ENTRY;
174
175         is_replay = *flags & LDLM_FL_REPLAY;
176         LASSERT(connh != NULL || !is_replay);
177
178         if (connh == NULL)
179                 return ldlm_cli_enqueue_local(ns, parent_lock_handle, res_id,
180                                               type, cookie, cookielen, mode,
181                                               flags, completion, blocking, data,
182                                               data_len, lockh);
183
184         /* If we're replaying this lock, just check some invariants.
185          * If we're creating a new lock, get everything all setup nice. */
186         if (is_replay) {
187                 lock = ldlm_handle2lock(lockh);
188                 LDLM_DEBUG(lock, "client-side enqueue START");
189                 LASSERT(connh == lock->l_connh);
190         } else {
191                 lock = ldlm_lock_create(ns, parent_lock_handle, res_id, type,
192                                         mode, data, data_len);
193                 if (lock == NULL)
194                         GOTO(out_nolock, rc = -ENOMEM);
195                 LDLM_DEBUG(lock, "client-side enqueue START");
196                 /* for the local lock, add the reference */
197                 ldlm_lock_addref_internal(lock, mode);
198                 ldlm_lock2handle(lock, lockh);
199                 if (type == LDLM_EXTENT)
200                         memcpy(&lock->l_extent, cookie,
201                                sizeof(body->lock_desc.l_extent));
202         }
203
204         if (req == NULL) {
205                 req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_ENQUEUE, 1,
206                                       &size, NULL);
207                 if (!req)
208                         GOTO(out, rc = -ENOMEM);
209                 req_passed_in = 0;
210         } else if (req->rq_reqmsg->buflens[0] != sizeof(*body))
211                 LBUG();
212
213         /* Dump lock data into the request buffer */
214         body = lustre_msg_buf(req->rq_reqmsg, 0);
215         ldlm_lock2desc(lock, &body->lock_desc);
216         body->lock_flags = *flags;
217
218         memcpy(&body->lock_handle1, lockh, sizeof(*lockh));
219         if (parent_lock_handle)
220                 memcpy(&body->lock_handle2, parent_lock_handle,
221                        sizeof(body->lock_handle2));
222
223         /* Continue as normal. */
224         if (!req_passed_in) {
225                 size = sizeof(*reply);
226                 req->rq_replen = lustre_msg_size(1, &size);
227         }
228         lock->l_connh = connh;
229         lock->l_export = NULL;
230
231         LDLM_DEBUG(lock, "sending request");
232         rc = ptlrpc_queue_wait(req);
233         rc = ptlrpc_check_status(req, rc);
234
235         if (rc != ELDLM_OK) {
236                 LASSERT(!is_replay);
237                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
238                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
239                 ldlm_lock_decref(lockh, mode);
240                 /* FIXME: if we've already received a completion AST, this will
241                  * LBUG! */
242                 ldlm_lock_destroy(lock);
243                 GOTO(out, rc);
244         }
245
246         reply = lustre_msg_buf(req->rq_repmsg, 0);
247         memcpy(&lock->l_remote_handle, &reply->lock_handle,
248                sizeof(lock->l_remote_handle));
249         *flags = reply->lock_flags;
250
251         CDEBUG(D_INFO, "local: %p, remote: %p, flags: %d\n", lock,
252                (void *)(unsigned long)reply->lock_handle.addr, *flags);
253         if (type == LDLM_EXTENT) {
254                 CDEBUG(D_INFO, "requested extent: "LPU64" -> "LPU64", got "
255                        "extent "LPU64" -> "LPU64"\n",
256                        body->lock_desc.l_extent.start,
257                        body->lock_desc.l_extent.end,
258                        reply->lock_extent.start, reply->lock_extent.end);
259                 cookie = &reply->lock_extent; /* FIXME bug 267 */
260                 cookielen = sizeof(reply->lock_extent);
261         }
262
263         /* If enqueue returned a blocked lock but the completion handler has
264          * already run, then it fixed up the resource and we don't need to do it
265          * again. */
266         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
267                 int newmode = reply->lock_mode;
268                 LASSERT(!is_replay);
269                 if (newmode && newmode != lock->l_req_mode) {
270                         LDLM_DEBUG(lock, "server returned different mode %s",
271                                    ldlm_lockname[newmode]);
272                         lock->l_req_mode = newmode;
273                 }
274
275                 if (reply->lock_resource_name[0] !=
276                     lock->l_resource->lr_name[0]) {
277                         CDEBUG(D_INFO, "remote intent success, locking %ld "
278                                "instead of %ld\n",
279                                (long)reply->lock_resource_name[0],
280                                (long)lock->l_resource->lr_name[0]);
281
282                         ldlm_lock_change_resource(lock,
283                                                   reply->lock_resource_name);
284                         if (lock->l_resource == NULL) {
285                                 LBUG();
286                                 RETURN(-ENOMEM);
287                         }
288                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
289                 }
290         }
291
292         if (!is_replay) {
293                 rc = ldlm_lock_enqueue(lock, cookie, cookielen, flags,
294                                        completion, blocking);
295                 if (lock->l_completion_ast)
296                         lock->l_completion_ast(lock, *flags);
297         }
298
299         if (!req_passed_in)
300                 ptlrpc_req_finished(req);
301
302         LDLM_DEBUG(lock, "client-side enqueue END");
303         EXIT;
304  out:
305         LDLM_LOCK_PUT(lock);
306  out_nolock:
307         return rc;
308 }
309
310 int ldlm_match_or_enqueue(struct lustre_handle *connh,
311                           struct ptlrpc_request *req,
312                           struct ldlm_namespace *ns,
313                           struct lustre_handle *parent_lock_handle,
314                           __u64 *res_id,
315                           __u32 type,
316                           void *cookie, int cookielen,
317                           ldlm_mode_t mode,
318                           int *flags,
319                           ldlm_completion_callback completion,
320                           ldlm_blocking_callback blocking,
321                           void *data,
322                           __u32 data_len,
323                           struct lustre_handle *lockh)
324 {
325         int rc;
326         ENTRY;
327         rc = ldlm_lock_match(ns, res_id, type, cookie, cookielen, mode, lockh);
328         if (rc == 0) {
329                 rc = ldlm_cli_enqueue(connh, req, ns,
330                                       parent_lock_handle, res_id, type, cookie,
331                                       cookielen, mode, flags, completion,
332                                       blocking, data, data_len, lockh);
333                 if (rc != ELDLM_OK)
334                         CERROR("ldlm_cli_enqueue: err: %d\n", rc);
335                 RETURN(rc);
336         } else
337                 RETURN(0);
338 }
339
340 int ldlm_cli_replay_enqueue(struct ldlm_lock *lock)
341 {
342         struct lustre_handle lockh;
343         int flags = LDLM_FL_REPLAY;
344         ldlm_lock2handle(lock, &lockh);
345         return ldlm_cli_enqueue(lock->l_connh, NULL, NULL, NULL, NULL,
346                                 lock->l_resource->lr_type, NULL, 0, -1, &flags,
347                                 NULL, NULL, NULL, 0, &lockh);
348 }
349
350 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
351                                   int *flags)
352 {
353         ENTRY;
354         if (lock->l_resource->lr_namespace->ns_client) {
355                 CERROR("Trying to cancel local lock\n");
356                 LBUG();
357         }
358         LDLM_DEBUG(lock, "client-side local convert");
359
360         ldlm_lock_convert(lock, new_mode, flags);
361         ldlm_reprocess_all(lock->l_resource);
362
363         LDLM_DEBUG(lock, "client-side local convert handler END");
364         LDLM_LOCK_PUT(lock);
365         RETURN(0);
366 }
367
368 /* FIXME: one of ldlm_cli_convert or the server side should reject attempted
369  * conversion of locks which are on the waiting or converting queue */
370 int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, int *flags)
371 {
372         struct ldlm_request *body;
373         struct lustre_handle *connh;
374         struct ldlm_reply *reply;
375         struct ldlm_lock *lock;
376         struct ldlm_resource *res;
377         struct ptlrpc_request *req;
378         int rc, size = sizeof(*body);
379         ENTRY;
380
381         lock = ldlm_handle2lock(lockh);
382         if (!lock) {
383                 LBUG();
384                 RETURN(-EINVAL);
385         }
386         *flags = 0;
387         connh = lock->l_connh;
388
389         if (!connh)
390                 RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
391
392         LDLM_DEBUG(lock, "client-side convert");
393
394         req = ptlrpc_prep_req(class_conn2cliimp(connh), LDLM_CONVERT, 1, &size,
395                               NULL);
396         if (!req)
397                 GOTO(out, rc = -ENOMEM);
398
399         body = lustre_msg_buf(req->rq_reqmsg, 0);
400         memcpy(&body->lock_handle1, &lock->l_remote_handle,
401                sizeof(body->lock_handle1));
402
403         body->lock_desc.l_req_mode = new_mode;
404         body->lock_flags = *flags;
405
406         size = sizeof(*reply);
407         req->rq_replen = lustre_msg_size(1, &size);
408
409         rc = ptlrpc_queue_wait(req);
410         rc = ptlrpc_check_status(req, rc);
411         if (rc != ELDLM_OK)
412                 GOTO(out, rc);
413
414         reply = lustre_msg_buf(req->rq_repmsg, 0);
415         res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
416         if (res != NULL)
417                 ldlm_reprocess_all(res);
418         /* Go to sleep until the lock is granted. */
419         /* FIXME: or cancelled. */
420         if (lock->l_completion_ast)
421                 lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC);
422         EXIT;
423  out:
424         LDLM_LOCK_PUT(lock);
425         ptlrpc_req_finished(req);
426         return rc;
427 }
428
429 int ldlm_cli_cancel(struct lustre_handle *lockh)
430 {
431         struct ptlrpc_request *req;
432         struct ldlm_lock *lock;
433         struct ldlm_request *body;
434         int rc = 0, size = sizeof(*body);
435         ENTRY;
436
437         lock = ldlm_handle2lock(lockh);
438         if (!lock) {
439                 /* It's possible that the decref that we did just before this
440                  * cancel was the last reader/writer, and caused a cancel before
441                  * we could call this function.  If we want to make this
442                  * impossible (by adding a dec_and_cancel() or similar), then
443                  * we can put the LBUG back. */
444                 //LBUG();
445                 RETURN(0);
446         }
447
448         if (lock->l_connh) {
449                 LDLM_DEBUG(lock, "client-side cancel");
450                 /* Set this flag to prevent others from getting new references*/
451                 l_lock(&lock->l_resource->lr_namespace->ns_lock);
452                 lock->l_flags |= LDLM_FL_CBPENDING;
453                 ldlm_cancel_callback(lock);
454                 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
455
456                 req = ptlrpc_prep_req(class_conn2cliimp(lock->l_connh),
457                                       LDLM_CANCEL, 1, &size, NULL);
458                 if (!req)
459                         GOTO(out, rc = -ENOMEM);
460
461                 /* XXX FIXME bug 249 */
462                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
463                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
464
465                 body = lustre_msg_buf(req->rq_reqmsg, 0);
466                 memcpy(&body->lock_handle1, &lock->l_remote_handle,
467                        sizeof(body->lock_handle1));
468
469                 req->rq_replen = lustre_msg_size(0, NULL);
470
471                 rc = ptlrpc_queue_wait(req);
472                 rc = ptlrpc_check_status(req, rc);
473                 ptlrpc_req_finished(req);
474                 if (rc != ELDLM_OK)
475                         GOTO(out, rc);
476
477                 ldlm_lock_cancel(lock);
478         } else {
479                 LDLM_DEBUG(lock, "client-side local cancel");
480                 if (lock->l_resource->lr_namespace->ns_client) {
481                         CERROR("Trying to cancel local lock\n");
482                         LBUG();
483                 }
484                 ldlm_lock_cancel(lock);
485                 ldlm_reprocess_all(lock->l_resource);
486                 LDLM_DEBUG(lock, "client-side local cancel handler END");
487         }
488
489         EXIT;
490  out:
491         LDLM_LOCK_PUT(lock);
492         return rc;
493 }
494
495 int ldlm_cancel_lru(struct ldlm_namespace *ns)
496 {
497         struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
498         int count, rc = 0;
499         struct ldlm_ast_work *w;
500         ENTRY;
501
502         l_lock(&ns->ns_lock);
503         count = ns->ns_nr_unused - ns->ns_max_unused;
504
505         if (count <= 0) {
506                 l_unlock(&ns->ns_lock);
507                 RETURN(0);
508         }
509
510         list_for_each_safe(tmp, next, &ns->ns_unused_list) {
511                 struct ldlm_lock *lock;
512                 lock = list_entry(tmp, struct ldlm_lock, l_lru);
513
514                 LASSERT(!lock->l_readers && !lock->l_writers);
515
516                 /* Setting the CBPENDING flag is a little misleading, but
517                  * prevents an important race; namely, once CBPENDING is set,
518                  * the lock can accumulate no more readers/writers.  Since
519                  * readers and writers are already zero here, ldlm_lock_decref
520                  * won't see this flag and call l_blocking_ast */
521                 lock->l_flags |= LDLM_FL_CBPENDING;
522
523                 OBD_ALLOC(w, sizeof(*w));
524                 LASSERT(w);
525
526                 w->w_lock = LDLM_LOCK_GET(lock);
527                 list_add(&w->w_list, &list);
528                 list_del_init(&lock->l_lru);
529
530                 if (--count == 0)
531                         break;
532         }
533         l_unlock(&ns->ns_lock);
534
535         list_for_each_safe(tmp, next, &list) {
536                 struct lustre_handle lockh;
537                 int rc;
538                 w = list_entry(tmp, struct ldlm_ast_work, w_list);
539
540                 ldlm_lock2handle(w->w_lock, &lockh);
541                 rc = ldlm_cli_cancel(&lockh);
542                 if (rc != ELDLM_OK)
543                         CDEBUG(D_INFO, "ldlm_cli_cancel: %d\n", rc);
544
545                 list_del(&w->w_list);
546                 LDLM_LOCK_PUT(w->w_lock);
547                 OBD_FREE(w, sizeof(*w));
548         }
549
550         RETURN(rc);
551 }
552
553 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
554                                     __u64 *res_id, int flags)
555 {
556         struct ldlm_resource *res;
557         struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
558         struct ldlm_ast_work *w;
559         ENTRY;
560
561         if ((flags & LDLM_FL_REDUCE) && 
562             ns->ns_max_unused > ns->ns_nr_unused)
563                 RETURN(0);
564
565         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
566         if (res == NULL) {
567                 /* This is not a problem. */
568                 CDEBUG(D_INFO, "No resource "LPU64"\n", res_id[0]);
569                 RETURN(0);
570         }
571
572         l_lock(&ns->ns_lock);
573         list_for_each(tmp, &res->lr_granted) {
574                 struct ldlm_lock *lock;
575                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
576
577                 if (lock->l_readers || lock->l_writers)
578                         continue;
579
580                 /* See CBPENDING comment in ldlm_cancel_lru */
581                 lock->l_flags |= LDLM_FL_CBPENDING;
582
583                 OBD_ALLOC(w, sizeof(*w));
584                 LASSERT(w);
585
586                 w->w_lock = LDLM_LOCK_GET(lock);
587                 list_add(&w->w_list, &list);
588                 if ((flags & LDLM_FL_REDUCE) && 
589                     ns->ns_max_unused > ns->ns_nr_unused)
590                         break;
591         }
592         l_unlock(&ns->ns_lock);
593
594         list_for_each_safe(tmp, next, &list) {
595                 struct lustre_handle lockh;
596                 int rc;
597                 w = list_entry(tmp, struct ldlm_ast_work, w_list);
598
599                 /* Prevent the cancel callback from being called by setting
600                  * LDLM_FL_CANCEL in the lock.  Very sneaky. -p */
601                 if (flags & LDLM_FL_NO_CALLBACK)
602                         w->w_lock->l_flags |= LDLM_FL_CANCEL;
603
604                 if (flags & LDLM_FL_LOCAL_ONLY) {
605                         ldlm_lock_cancel(w->w_lock);
606                 } else {
607                         ldlm_lock2handle(w->w_lock, &lockh);
608                         rc = ldlm_cli_cancel(&lockh);
609                         if (rc != ELDLM_OK)
610                                 CERROR("ldlm_cli_cancel: %d\n", rc);
611                 }
612                 list_del(&w->w_list);
613                 LDLM_LOCK_PUT(w->w_lock);
614                 OBD_FREE(w, sizeof(*w));
615         }
616
617         ldlm_resource_put(res);
618
619         RETURN(0);
620 }
621
622 /* Cancel all locks on a namespace (or a specific resource, if given) that have
623  * 0 readers/writers.
624  *
625  * If 'local_only' is true, throw the locks away without trying to notify the
626  * server. */
627 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, __u64 *res_id,
628                            int flags)
629 {
630         int i;
631
632         if (res_id)
633                 RETURN(ldlm_cli_cancel_unused_resource(ns, res_id, flags));
634
635         l_lock(&ns->ns_lock);
636         for (i = 0; i < RES_HASH_SIZE; i++) {
637                 struct list_head *tmp, *pos;
638                 list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
639                         int rc;
640                         struct ldlm_resource *res;
641                         res = list_entry(tmp, struct ldlm_resource, lr_hash);
642                         ldlm_resource_getref(res);
643
644                         rc = ldlm_cli_cancel_unused_resource(ns, res->lr_name,
645                                                              flags);
646
647                         if (rc)
648                                 CERROR("cancel_unused_res ("LPU64"): %d\n",
649                                        res->lr_name[0], rc);
650                         ldlm_resource_put(res);
651                 }
652         }
653         l_unlock(&ns->ns_lock);
654
655         return ELDLM_OK;
656 }