1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/llite/llite_capa.c
40 * Author: Lai Siyao <lsy@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_LLITE
46 #include <linux/version.h>
47 #include <asm/uaccess.h>
48 #include <linux/file.h>
49 #include <linux/kmod.h>
51 #include <lustre_lite.h>
52 #include "llite_internal.h"
54 /* for obd_capa.c_list, client capa might stay in three places:
57 * 3. stand alone: just allocated.
60 /* capas for oss writeback and those failed to renew */
61 static CFS_LIST_HEAD(ll_idle_capas);
62 static struct ptlrpc_thread ll_capa_thread;
63 static cfs_list_t *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
65 /* llite capa renewal timer */
66 struct timer_list ll_capa_timer;
67 /* for debug: indicate whether capa on llite is enabled or not */
68 static cfs_atomic_t ll_capa_debug = CFS_ATOMIC_INIT(0);
69 static unsigned long long ll_capa_renewed = 0;
70 static unsigned long long ll_capa_renewal_noent = 0;
71 static unsigned long long ll_capa_renewal_failed = 0;
72 static unsigned long long ll_capa_renewal_retries = 0;
74 static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
76 if (cfs_time_before(expiry, ll_capa_timer.expires) ||
77 !timer_pending(&ll_capa_timer)) {
78 mod_timer(&ll_capa_timer, expiry);
79 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
80 "ll_capa_timer update: %lu/%lu by", expiry, jiffies);
84 static inline cfs_time_t capa_renewal_time(struct obd_capa *ocapa)
86 return cfs_time_sub(ocapa->c_expiry,
87 cfs_time_seconds(ocapa->c_capa.lc_timeout) / 2);
90 static inline int capa_is_to_expire(struct obd_capa *ocapa)
92 return cfs_time_beforeq(capa_renewal_time(ocapa), cfs_time_current());
95 static inline int have_expired_capa(void)
97 struct obd_capa *ocapa = NULL;
100 /* if ll_capa_list has client capa to expire or ll_idle_capas has
101 * expired capa, return 1.
103 cfs_spin_lock(&capa_lock);
104 if (!cfs_list_empty(ll_capa_list)) {
105 ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa,
107 expired = capa_is_to_expire(ocapa);
109 update_capa_timer(ocapa, capa_renewal_time(ocapa));
110 } else if (!cfs_list_empty(&ll_idle_capas)) {
111 ocapa = cfs_list_entry(ll_idle_capas.next, struct obd_capa,
113 expired = capa_is_expired(ocapa);
115 update_capa_timer(ocapa, ocapa->c_expiry);
117 cfs_spin_unlock(&capa_lock);
120 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
124 static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
126 struct obd_capa *tmp;
127 cfs_list_t *before = NULL;
129 /* TODO: client capa is sorted by expiry, this could be optimized */
130 cfs_list_for_each_entry_reverse(tmp, head, c_list) {
131 if (cfs_time_aftereq(ocapa->c_expiry, tmp->c_expiry)) {
132 before = &tmp->c_list;
137 LASSERT(&ocapa->c_list != before);
138 cfs_list_add(&ocapa->c_list, before ?: head);
141 static inline int obd_capa_open_count(struct obd_capa *oc)
143 struct ll_inode_info *lli = ll_i2info(oc->u.cli.inode);
144 return cfs_atomic_read(&lli->lli_open_count);
147 static void ll_delete_capa(struct obd_capa *ocapa)
149 struct ll_inode_info *lli = ll_i2info(ocapa->u.cli.inode);
151 if (capa_for_mds(&ocapa->c_capa)) {
152 LASSERT(lli->lli_mds_capa == ocapa);
153 lli->lli_mds_capa = NULL;
154 } else if (capa_for_oss(&ocapa->c_capa)) {
155 cfs_list_del_init(&ocapa->u.cli.lli_list);
158 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free client");
159 cfs_list_del_init(&ocapa->c_list);
160 capa_count[CAPA_SITE_CLIENT]--;
161 /* release the ref when alloc */
165 /* three places where client capa is deleted:
166 * 1. capa_thread_main(), main place to delete expired capa.
167 * 2. ll_clear_inode_capas() in ll_clear_inode().
168 * 3. ll_truncate_free_capa() delete truncate capa explicitly in ll_truncate().
170 static int capa_thread_main(void *unused)
172 struct obd_capa *ocapa, *tmp, *next;
173 struct inode *inode = NULL;
174 struct l_wait_info lwi = { 0 };
178 cfs_daemonize("ll_capa");
180 thread_set_flags(&ll_capa_thread, SVC_RUNNING);
181 cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
184 l_wait_event(ll_capa_thread.t_ctl_waitq,
185 !thread_is_running(&ll_capa_thread) ||
189 if (!thread_is_running(&ll_capa_thread))
194 cfs_spin_lock(&capa_lock);
195 cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
198 LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
200 if (!capa_is_to_expire(ocapa)) {
205 cfs_list_del_init(&ocapa->c_list);
207 /* for MDS capability, only renew those which belong to
208 * dir, or its inode is opened, or client holds LOOKUP
211 /* ibits may be changed by ll_have_md_lock() so we have
212 * to set it each time */
213 ibits = MDS_INODELOCK_LOOKUP;
214 if (capa_for_mds(&ocapa->c_capa) &&
215 !S_ISDIR(ocapa->u.cli.inode->i_mode) &&
216 obd_capa_open_count(ocapa) == 0 &&
217 !ll_have_md_lock(ocapa->u.cli.inode,
218 &ibits, LCK_MINMODE)) {
219 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
221 sort_add_capa(ocapa, &ll_idle_capas);
225 /* for OSS capability, only renew those whose inode is
228 if (capa_for_oss(&ocapa->c_capa) &&
229 obd_capa_open_count(ocapa) == 0) {
230 /* oss capa with open count == 0 won't renew,
231 * move to idle list */
232 sort_add_capa(ocapa, &ll_idle_capas);
236 /* NB iput() is in ll_update_capa() */
237 inode = igrab(ocapa->u.cli.inode);
239 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
246 cfs_spin_unlock(&capa_lock);
247 rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
249 cfs_spin_lock(&capa_lock);
251 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
252 "renew failed: %d", rc);
253 ll_capa_renewal_failed++;
258 update_capa_timer(next, capa_renewal_time(next));
260 cfs_list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
262 if (!capa_is_expired(ocapa)) {
264 update_capa_timer(ocapa,
269 if (cfs_atomic_read(&ocapa->c_refc) > 1) {
270 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
271 "expired(c_refc %d), don't release",
272 cfs_atomic_read(&ocapa->c_refc));
273 /* don't try to renew any more */
274 cfs_list_del_init(&ocapa->c_list);
278 /* expired capa is released. */
279 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "release expired");
280 ll_delete_capa(ocapa);
283 cfs_spin_unlock(&capa_lock);
286 thread_set_flags(&ll_capa_thread, SVC_STOPPED);
287 cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
291 void ll_capa_timer_callback(unsigned long unused)
293 cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
296 int ll_capa_thread_start(void)
301 cfs_waitq_init(&ll_capa_thread.t_ctl_waitq);
303 rc = cfs_create_thread(capa_thread_main, NULL, 0);
305 CERROR("cannot start expired capa thread: rc %d\n", rc);
308 cfs_wait_event(ll_capa_thread.t_ctl_waitq,
309 thread_is_running(&ll_capa_thread));
314 void ll_capa_thread_stop(void)
316 thread_set_flags(&ll_capa_thread, SVC_STOPPING);
317 cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
318 cfs_wait_event(ll_capa_thread.t_ctl_waitq,
319 thread_is_stopped(&ll_capa_thread));
322 struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
324 struct ll_inode_info *lli = ll_i2info(inode);
325 struct obd_capa *ocapa;
330 if ((ll_i2sbi(inode)->ll_flags & LL_SBI_OSS_CAPA) == 0)
333 LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
334 opc == CAPA_OPC_OSS_TRUNC);
336 cfs_spin_lock(&capa_lock);
337 cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
338 if (capa_is_expired(ocapa))
340 if ((opc & CAPA_OPC_OSS_WRITE) &&
341 capa_opc_supported(&ocapa->c_capa, CAPA_OPC_OSS_WRITE)) {
344 } else if ((opc & CAPA_OPC_OSS_READ) &&
345 capa_opc_supported(&ocapa->c_capa,
346 CAPA_OPC_OSS_READ)) {
349 } else if ((opc & CAPA_OPC_OSS_TRUNC) &&
350 capa_opc_supported(&ocapa->c_capa, opc)) {
357 LASSERT(lu_fid_eq(capa_fid(&ocapa->c_capa),
358 ll_inode2fid(inode)));
359 LASSERT(ocapa->c_site == CAPA_SITE_CLIENT);
363 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found client");
367 if (cfs_atomic_read(&ll_capa_debug)) {
368 CERROR("no capability for "DFID" opc "LPX64"\n",
369 PFID(&lli->lli_fid), opc);
370 cfs_atomic_set(&ll_capa_debug, 0);
373 cfs_spin_unlock(&capa_lock);
377 EXPORT_SYMBOL(ll_osscapa_get);
379 struct obd_capa *ll_mdscapa_get(struct inode *inode)
381 struct ll_inode_info *lli = ll_i2info(inode);
382 struct obd_capa *ocapa;
385 LASSERT(inode != NULL);
387 if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
390 cfs_spin_lock(&capa_lock);
391 ocapa = capa_get(lli->lli_mds_capa);
392 cfs_spin_unlock(&capa_lock);
393 if (!ocapa && cfs_atomic_read(&ll_capa_debug)) {
394 CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
395 cfs_atomic_set(&ll_capa_debug, 0);
401 static struct obd_capa *do_add_mds_capa(struct inode *inode,
402 struct obd_capa *ocapa)
404 struct ll_inode_info *lli = ll_i2info(inode);
405 struct obd_capa *old = lli->lli_mds_capa;
406 struct lustre_capa *capa = &ocapa->c_capa;
409 ocapa->u.cli.inode = inode;
410 lli->lli_mds_capa = ocapa;
411 capa_count[CAPA_SITE_CLIENT]++;
413 DEBUG_CAPA(D_SEC, capa, "add MDS");
415 cfs_spin_lock(&old->c_lock);
417 cfs_spin_unlock(&old->c_lock);
419 DEBUG_CAPA(D_SEC, capa, "update MDS");
427 static struct obd_capa *do_lookup_oss_capa(struct inode *inode, int opc)
429 struct ll_inode_info *lli = ll_i2info(inode);
430 struct obd_capa *ocapa;
432 /* inside capa_lock */
433 cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
434 if ((capa_opc(&ocapa->c_capa) & opc) != opc)
437 LASSERT(lu_fid_eq(capa_fid(&ocapa->c_capa),
438 ll_inode2fid(inode)));
439 LASSERT(ocapa->c_site == CAPA_SITE_CLIENT);
441 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found client");
448 static inline void inode_add_oss_capa(struct inode *inode,
449 struct obd_capa *ocapa)
451 struct ll_inode_info *lli = ll_i2info(inode);
452 struct obd_capa *tmp;
453 cfs_list_t *next = NULL;
455 /* capa is sorted in lli_oss_capas so lookup can always find the
457 cfs_list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
458 if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
459 next = &tmp->u.cli.lli_list;
463 LASSERT(&ocapa->u.cli.lli_list != next);
464 cfs_list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
467 static struct obd_capa *do_add_oss_capa(struct inode *inode,
468 struct obd_capa *ocapa)
470 struct obd_capa *old;
471 struct lustre_capa *capa = &ocapa->c_capa;
473 LASSERTF(S_ISREG(inode->i_mode),
474 "inode has oss capa, but not regular file, mode: %d\n",
477 /* FIXME: can't replace it so easily with fine-grained opc */
478 old = do_lookup_oss_capa(inode, capa_opc(capa) & CAPA_OPC_OSS_ONLY);
480 ocapa->u.cli.inode = inode;
481 CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
482 capa_count[CAPA_SITE_CLIENT]++;
484 DEBUG_CAPA(D_SEC, capa, "add OSS");
486 cfs_spin_lock(&old->c_lock);
488 cfs_spin_unlock(&old->c_lock);
490 DEBUG_CAPA(D_SEC, capa, "update OSS");
496 inode_add_oss_capa(inode, ocapa);
500 struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
502 cfs_spin_lock(&capa_lock);
503 ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
504 do_add_oss_capa(inode, ocapa);
506 /* truncate capa won't renew */
507 if (ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC) {
508 set_capa_expiry(ocapa);
509 cfs_list_del_init(&ocapa->c_list);
510 sort_add_capa(ocapa, ll_capa_list);
512 update_capa_timer(ocapa, capa_renewal_time(ocapa));
515 cfs_spin_unlock(&capa_lock);
517 cfs_atomic_set(&ll_capa_debug, 1);
521 static inline void delay_capa_renew(struct obd_capa *oc, cfs_time_t delay)
523 /* NB: set a fake expiry for this capa to prevent it renew too soon */
524 oc->c_expiry = cfs_time_add(oc->c_expiry, cfs_time_seconds(delay));
527 int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
529 struct inode *inode = ocapa->u.cli.inode;
538 cfs_spin_lock(&capa_lock);
540 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
541 "renewal canceled because object removed");
542 ll_capa_renewal_noent++;
544 ll_capa_renewal_failed++;
546 /* failed capa won't be renewed any longer, but if -EIO,
547 * client might be doing recovery, retry in 2 min. */
548 if (rc == -EIO && !capa_is_expired(ocapa)) {
549 delay_capa_renew(ocapa, 120);
550 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
551 "renewal failed: -EIO, "
553 ll_capa_renewal_retries++;
556 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
557 "renewal failed(rc: %d) for", rc);
561 cfs_list_del_init(&ocapa->c_list);
562 sort_add_capa(ocapa, &ll_idle_capas);
563 cfs_spin_unlock(&capa_lock);
570 cfs_spin_lock(&ocapa->c_lock);
571 LASSERT(!memcmp(&ocapa->c_capa, capa,
572 offsetof(struct lustre_capa, lc_opc)));
573 ocapa->c_capa = *capa;
574 set_capa_expiry(ocapa);
575 cfs_spin_unlock(&ocapa->c_lock);
577 cfs_spin_lock(&capa_lock);
578 if (capa_for_oss(capa))
579 inode_add_oss_capa(inode, ocapa);
580 DEBUG_CAPA(D_SEC, capa, "renew");
583 cfs_list_del_init(&ocapa->c_list);
584 sort_add_capa(ocapa, ll_capa_list);
585 update_capa_timer(ocapa, capa_renewal_time(ocapa));
586 cfs_spin_unlock(&capa_lock);
593 void ll_capa_open(struct inode *inode)
595 struct ll_inode_info *lli = ll_i2info(inode);
597 if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
601 if (!S_ISREG(inode->i_mode))
604 cfs_atomic_inc(&lli->lli_open_count);
607 void ll_capa_close(struct inode *inode)
609 struct ll_inode_info *lli = ll_i2info(inode);
611 if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
615 if (!S_ISREG(inode->i_mode))
618 cfs_atomic_dec(&lli->lli_open_count);
621 /* delete CAPA_OPC_OSS_TRUNC only */
622 void ll_truncate_free_capa(struct obd_capa *ocapa)
627 LASSERT(ocapa->c_capa.lc_opc & CAPA_OPC_OSS_TRUNC);
628 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free truncate");
630 /* release ref when find */
632 if (likely(ocapa->c_capa.lc_opc == CAPA_OPC_OSS_TRUNC)) {
633 cfs_spin_lock(&capa_lock);
634 ll_delete_capa(ocapa);
635 cfs_spin_unlock(&capa_lock);
639 void ll_clear_inode_capas(struct inode *inode)
641 struct ll_inode_info *lli = ll_i2info(inode);
642 struct obd_capa *ocapa, *tmp;
644 cfs_spin_lock(&capa_lock);
645 ocapa = lli->lli_mds_capa;
647 ll_delete_capa(ocapa);
649 cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
651 ll_delete_capa(ocapa);
652 cfs_spin_unlock(&capa_lock);
655 void ll_print_capa_stat(struct ll_sb_info *sbi)
657 if (sbi->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
658 LCONSOLE_INFO("Fid capabilities renewed: %llu\n"
659 "Fid capabilities renewal ENOENT: %llu\n"
660 "Fid capabilities failed to renew: %llu\n"
661 "Fid capabilities renewal retries: %llu\n",
662 ll_capa_renewed, ll_capa_renewal_noent,
663 ll_capa_renewal_failed, ll_capa_renewal_retries);