1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/llite/llite_capa.c
38 * Author: Lai Siyao <lsy@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_LLITE
44 #include <linux/version.h>
45 #include <asm/uaccess.h>
46 #include <linux/file.h>
47 #include <linux/kmod.h>
49 #include <lustre_lite.h>
50 #include "llite_internal.h"
52 /* for obd_capa.c_list, client capa might stay in three places:
55 * 3. stand alone: just allocated.
58 /* capas for oss writeback and those failed to renew */
59 static LIST_HEAD(ll_idle_capas);
60 static struct ptlrpc_thread ll_capa_thread;
61 static struct list_head *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
63 /* llite capa renewal timer */
64 struct timer_list ll_capa_timer;
65 /* for debug: indicate whether capa on llite is enabled or not */
66 static atomic_t ll_capa_debug = ATOMIC_INIT(0);
67 static unsigned long long ll_capa_renewed = 0;
68 static unsigned long long ll_capa_renewal_noent = 0;
69 static unsigned long long ll_capa_renewal_failed = 0;
70 static unsigned long long ll_capa_renewal_retries = 0;
72 static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
74 if (time_before(expiry, ll_capa_timer.expires) ||
75 !timer_pending(&ll_capa_timer)) {
76 mod_timer(&ll_capa_timer, expiry);
77 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
78 "ll_capa_timer update: %lu/%lu by", expiry, jiffies);
82 static inline cfs_time_t capa_renewal_time(struct obd_capa *ocapa)
84 return cfs_time_sub(ocapa->c_expiry,
85 cfs_time_seconds(ocapa->c_capa.lc_timeout) / 2);
88 static inline int capa_is_to_expire(struct obd_capa *ocapa)
90 return cfs_time_beforeq(capa_renewal_time(ocapa), cfs_time_current());
93 static inline int have_expired_capa(void)
95 struct obd_capa *ocapa = NULL;
98 /* if ll_capa_list has client capa to expire or ll_idle_capas has
99 * expired capa, return 1.
101 spin_lock(&capa_lock);
102 if (!list_empty(ll_capa_list)) {
103 ocapa = list_entry(ll_capa_list->next, struct obd_capa, c_list);
104 expired = capa_is_to_expire(ocapa);
106 update_capa_timer(ocapa, capa_renewal_time(ocapa));
107 } else if (!list_empty(&ll_idle_capas)) {
108 ocapa = list_entry(ll_idle_capas.next, struct obd_capa, c_list);
109 expired = capa_is_expired(ocapa);
111 update_capa_timer(ocapa, ocapa->c_expiry);
113 spin_unlock(&capa_lock);
116 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
120 static inline int ll_capa_check_stop(void)
122 return (ll_capa_thread.t_flags & SVC_STOPPING) ? 1: 0;
125 static void sort_add_capa(struct obd_capa *ocapa, struct list_head *head)
127 struct obd_capa *tmp;
128 struct list_head *before = NULL;
130 /* TODO: client capa is sorted by expiry, this could be optimized */
131 list_for_each_entry_reverse(tmp, head, c_list) {
132 if (cfs_time_aftereq(ocapa->c_expiry, tmp->c_expiry)) {
133 before = &tmp->c_list;
138 LASSERT(&ocapa->c_list != before);
139 list_add(&ocapa->c_list, before ?: head);
142 static inline int obd_capa_open_count(struct obd_capa *oc)
144 struct ll_inode_info *lli = ll_i2info(oc->u.cli.inode);
145 return atomic_read(&lli->lli_open_count);
148 static void ll_delete_capa(struct obd_capa *ocapa)
150 struct ll_inode_info *lli = ll_i2info(ocapa->u.cli.inode);
152 if (capa_for_mds(&ocapa->c_capa)) {
153 LASSERT(lli->lli_mds_capa == ocapa);
154 lli->lli_mds_capa = NULL;
155 } else if (capa_for_oss(&ocapa->c_capa)) {
156 list_del_init(&ocapa->u.cli.lli_list);
159 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free client");
160 list_del_init(&ocapa->c_list);
161 capa_count[CAPA_SITE_CLIENT]--;
162 /* release the ref when alloc */
166 /* three places where client capa is deleted:
167 * 1. capa_thread_main(), main place to delete expired capa.
168 * 2. ll_clear_inode_capas() in ll_clear_inode().
169 * 3. ll_truncate_free_capa() delete truncate capa explicitly in ll_truncate().
171 static int capa_thread_main(void *unused)
173 struct obd_capa *ocapa, *tmp, *next;
174 struct inode *inode = NULL;
175 struct l_wait_info lwi = { 0 };
179 cfs_daemonize("ll_capa");
181 ll_capa_thread.t_flags = SVC_RUNNING;
182 wake_up(&ll_capa_thread.t_ctl_waitq);
185 l_wait_event(ll_capa_thread.t_ctl_waitq,
186 (ll_capa_check_stop() || have_expired_capa()),
189 if (ll_capa_check_stop())
194 spin_lock(&capa_lock);
195 list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
196 LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
198 if (!capa_is_to_expire(ocapa)) {
203 list_del_init(&ocapa->c_list);
205 /* for MDS capability, only renew those which belong to
206 * dir, or its inode is opened, or client holds LOOKUP
209 if (capa_for_mds(&ocapa->c_capa) &&
210 !S_ISDIR(ocapa->u.cli.inode->i_mode) &&
211 obd_capa_open_count(ocapa) == 0 &&
212 !ll_have_md_lock(ocapa->u.cli.inode,
213 MDS_INODELOCK_LOOKUP)) {
214 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
216 sort_add_capa(ocapa, &ll_idle_capas);
220 /* for OSS capability, only renew those whose inode is
223 if (capa_for_oss(&ocapa->c_capa) &&
224 obd_capa_open_count(ocapa) == 0) {
225 /* oss capa with open count == 0 won't renew,
226 * move to idle list */
227 sort_add_capa(ocapa, &ll_idle_capas);
231 /* NB iput() is in ll_update_capa() */
232 inode = igrab(ocapa->u.cli.inode);
234 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
241 spin_unlock(&capa_lock);
242 rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
244 spin_lock(&capa_lock);
246 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
247 "renew failed: %d", rc);
248 ll_capa_renewal_failed++;
253 update_capa_timer(next, capa_renewal_time(next));
255 list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas, c_list) {
256 if (!capa_is_expired(ocapa)) {
258 update_capa_timer(ocapa, ocapa->c_expiry);
262 if (atomic_read(&ocapa->c_refc) > 1) {
263 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
264 "expired(c_refc %d), don't release",
265 atomic_read(&ocapa->c_refc));
266 /* don't try to renew any more */
267 list_del_init(&ocapa->c_list);
271 /* expired capa is released. */
272 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "release expired");
273 ll_delete_capa(ocapa);
276 spin_unlock(&capa_lock);
279 ll_capa_thread.t_flags = SVC_STOPPED;
280 wake_up(&ll_capa_thread.t_ctl_waitq);
284 void ll_capa_timer_callback(unsigned long unused)
286 wake_up(&ll_capa_thread.t_ctl_waitq);
289 int ll_capa_thread_start(void)
294 init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
296 rc = kernel_thread(capa_thread_main, NULL, 0);
298 CERROR("cannot start expired capa thread: rc %d\n", rc);
301 wait_event(ll_capa_thread.t_ctl_waitq,
302 ll_capa_thread.t_flags & SVC_RUNNING);
307 void ll_capa_thread_stop(void)
309 ll_capa_thread.t_flags = SVC_STOPPING;
310 wake_up(&ll_capa_thread.t_ctl_waitq);
311 wait_event(ll_capa_thread.t_ctl_waitq,
312 ll_capa_thread.t_flags & SVC_STOPPED);
315 struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
317 struct ll_inode_info *lli = ll_i2info(inode);
318 struct obd_capa *ocapa;
323 if ((ll_i2sbi(inode)->ll_flags & LL_SBI_OSS_CAPA) == 0)
326 LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
327 opc == CAPA_OPC_OSS_TRUNC);
329 spin_lock(&capa_lock);
330 list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
331 if (capa_is_expired(ocapa))
333 if ((opc & CAPA_OPC_OSS_WRITE) &&
334 capa_opc_supported(&ocapa->c_capa, CAPA_OPC_OSS_WRITE)) {
337 } else if ((opc & CAPA_OPC_OSS_READ) &&
338 capa_opc_supported(&ocapa->c_capa,
339 CAPA_OPC_OSS_READ)) {
342 } else if ((opc & CAPA_OPC_OSS_TRUNC) &&
343 capa_opc_supported(&ocapa->c_capa, opc)) {
350 LASSERT(lu_fid_eq(capa_fid(&ocapa->c_capa),
351 ll_inode2fid(inode)));
352 LASSERT(ocapa->c_site == CAPA_SITE_CLIENT);
356 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found client");
360 if (atomic_read(&ll_capa_debug)) {
361 CERROR("no capability for "DFID" opc "LPX64"\n",
362 PFID(&lli->lli_fid), opc);
363 atomic_set(&ll_capa_debug, 0);
366 spin_unlock(&capa_lock);
370 EXPORT_SYMBOL(ll_osscapa_get);
372 struct obd_capa *ll_mdscapa_get(struct inode *inode)
374 struct ll_inode_info *lli = ll_i2info(inode);
375 struct obd_capa *ocapa;
378 LASSERT(inode != NULL);
380 if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
383 spin_lock(&capa_lock);
384 ocapa = capa_get(lli->lli_mds_capa);
385 spin_unlock(&capa_lock);
386 if (!ocapa && atomic_read(&ll_capa_debug)) {
387 CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
388 atomic_set(&ll_capa_debug, 0);
394 static struct obd_capa *do_add_mds_capa(struct inode *inode,
395 struct obd_capa *ocapa)
397 struct ll_inode_info *lli = ll_i2info(inode);
398 struct obd_capa *old = lli->lli_mds_capa;
399 struct lustre_capa *capa = &ocapa->c_capa;
402 ocapa->u.cli.inode = inode;
403 lli->lli_mds_capa = ocapa;
404 capa_count[CAPA_SITE_CLIENT]++;
406 DEBUG_CAPA(D_SEC, capa, "add MDS");
408 spin_lock(&old->c_lock);
410 spin_unlock(&old->c_lock);
412 DEBUG_CAPA(D_SEC, capa, "update MDS");
420 static struct obd_capa *do_lookup_oss_capa(struct inode *inode, int opc)
422 struct ll_inode_info *lli = ll_i2info(inode);
423 struct obd_capa *ocapa;
425 /* inside capa_lock */
426 list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
427 if ((capa_opc(&ocapa->c_capa) & opc) != opc)
430 LASSERT(lu_fid_eq(capa_fid(&ocapa->c_capa),
431 ll_inode2fid(inode)));
432 LASSERT(ocapa->c_site == CAPA_SITE_CLIENT);
434 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found client");
441 static inline void inode_add_oss_capa(struct inode *inode,
442 struct obd_capa *ocapa)
444 struct ll_inode_info *lli = ll_i2info(inode);
445 struct obd_capa *tmp;
446 struct list_head *next = NULL;
448 /* capa is sorted in lli_oss_capas so lookup can always find the
450 list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
451 if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
452 next = &tmp->u.cli.lli_list;
456 LASSERT(&ocapa->u.cli.lli_list != next);
457 list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
460 static struct obd_capa *do_add_oss_capa(struct inode *inode,
461 struct obd_capa *ocapa)
463 struct obd_capa *old;
464 struct lustre_capa *capa = &ocapa->c_capa;
466 LASSERTF(S_ISREG(inode->i_mode),
467 "inode has oss capa, but not regular file, mode: %d\n",
470 /* FIXME: can't replace it so easily with fine-grained opc */
471 old = do_lookup_oss_capa(inode, capa_opc(capa) & CAPA_OPC_OSS_ONLY);
473 ocapa->u.cli.inode = inode;
474 INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
475 capa_count[CAPA_SITE_CLIENT]++;
477 DEBUG_CAPA(D_SEC, capa, "add OSS");
479 spin_lock(&old->c_lock);
481 spin_unlock(&old->c_lock);
483 DEBUG_CAPA(D_SEC, capa, "update OSS");
489 inode_add_oss_capa(inode, ocapa);
493 struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
495 spin_lock(&capa_lock);
496 ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
497 do_add_oss_capa(inode, ocapa);
499 /* truncate capa won't renew */
500 if (ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC) {
501 set_capa_expiry(ocapa);
502 list_del_init(&ocapa->c_list);
503 sort_add_capa(ocapa, ll_capa_list);
505 update_capa_timer(ocapa, capa_renewal_time(ocapa));
508 spin_unlock(&capa_lock);
510 atomic_set(&ll_capa_debug, 1);
514 static inline void delay_capa_renew(struct obd_capa *oc, cfs_time_t delay)
516 /* NB: set a fake expiry for this capa to prevent it renew too soon */
517 oc->c_expiry = cfs_time_add(oc->c_expiry, cfs_time_seconds(delay));
520 int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
522 struct inode *inode = ocapa->u.cli.inode;
531 spin_lock(&capa_lock);
533 DEBUG_CAPA(D_SEC, &ocapa->c_capa,
534 "renewal canceled because object removed");
535 ll_capa_renewal_noent++;
537 ll_capa_renewal_failed++;
539 /* failed capa won't be renewed any longer, but if -EIO,
540 * client might be doing recovery, retry in 2 min. */
541 if (rc == -EIO && !capa_is_expired(ocapa)) {
542 delay_capa_renew(ocapa, 120);
543 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
544 "renewal failed: -EIO, retry in 2 mins");
545 ll_capa_renewal_retries++;
548 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
549 "renewal failed(rc: %d) for", rc);
553 list_del_init(&ocapa->c_list);
554 sort_add_capa(ocapa, &ll_idle_capas);
555 spin_unlock(&capa_lock);
562 spin_lock(&ocapa->c_lock);
563 LASSERT(!memcmp(&ocapa->c_capa, capa,
564 offsetof(struct lustre_capa, lc_opc)));
565 ocapa->c_capa = *capa;
566 set_capa_expiry(ocapa);
567 spin_unlock(&ocapa->c_lock);
569 spin_lock(&capa_lock);
570 if (capa_for_oss(capa))
571 inode_add_oss_capa(inode, ocapa);
572 DEBUG_CAPA(D_SEC, capa, "renew");
575 list_del_init(&ocapa->c_list);
576 sort_add_capa(ocapa, ll_capa_list);
577 update_capa_timer(ocapa, capa_renewal_time(ocapa));
578 spin_unlock(&capa_lock);
585 void ll_capa_open(struct inode *inode)
587 struct ll_inode_info *lli = ll_i2info(inode);
589 if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
593 if (!S_ISREG(inode->i_mode))
596 atomic_inc(&lli->lli_open_count);
599 void ll_capa_close(struct inode *inode)
601 struct ll_inode_info *lli = ll_i2info(inode);
603 if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
607 if (!S_ISREG(inode->i_mode))
610 atomic_dec(&lli->lli_open_count);
613 /* delete CAPA_OPC_OSS_TRUNC only */
614 void ll_truncate_free_capa(struct obd_capa *ocapa)
619 LASSERT(ocapa->c_capa.lc_opc & CAPA_OPC_OSS_TRUNC);
620 DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free truncate");
622 /* release ref when find */
624 if (likely(ocapa->c_capa.lc_opc == CAPA_OPC_OSS_TRUNC)) {
625 spin_lock(&capa_lock);
626 ll_delete_capa(ocapa);
627 spin_unlock(&capa_lock);
631 void ll_clear_inode_capas(struct inode *inode)
633 struct ll_inode_info *lli = ll_i2info(inode);
634 struct obd_capa *ocapa, *tmp;
636 spin_lock(&capa_lock);
637 ocapa = lli->lli_mds_capa;
639 ll_delete_capa(ocapa);
641 list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
643 ll_delete_capa(ocapa);
644 spin_unlock(&capa_lock);
647 void ll_print_capa_stat(struct ll_sb_info *sbi)
649 if (sbi->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
650 LCONSOLE_INFO("Fid capabilities renewed: %llu\n"
651 "Fid capabilities renewal ENOENT: %llu\n"
652 "Fid capabilities failed to renew: %llu\n"
653 "Fid capabilities renewal retries: %llu\n",
654 ll_capa_renewed, ll_capa_renewal_noent,
655 ll_capa_renewal_failed, ll_capa_renewal_retries);