1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Light Super operations
6 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LLITE
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/random.h>
29 #include <linux/version.h>
31 #include <linux/lustre_lite.h>
32 #include <linux/lustre_ha.h>
33 #include <linux/lustre_dlm.h>
34 #include <linux/lprocfs_status.h>
35 #include "llite_internal.h"
37 kmem_cache_t *ll_file_data_slab;
39 LIST_HEAD(ll_super_blocks);
40 spinlock_t ll_sb_lock = SPIN_LOCK_UNLOCKED;
42 extern struct address_space_operations ll_aops;
43 extern struct address_space_operations ll_dir_aops;
46 #define log2(n) ffz(~(n))
49 struct ll_sb_info *lustre_init_sbi(struct super_block *sb)
51 struct ll_sb_info *sbi = NULL;
55 OBD_ALLOC(sbi, sizeof(*sbi));
59 spin_lock_init(&sbi->ll_lock);
60 INIT_LIST_HEAD(&sbi->ll_pglist);
61 sbi->ll_pglist_gen = 0;
62 if (num_physpages >> (20 - PAGE_SHIFT) < 512)
63 sbi->ll_async_page_max = num_physpages / 2;
65 sbi->ll_async_page_max = (num_physpages / 4) * 3;
66 sbi->ll_ra_info.ra_max_pages = min(num_physpages / 8,
67 SBI_DEFAULT_READAHEAD_MAX);
69 INIT_LIST_HEAD(&sbi->ll_conn_chain);
70 INIT_HLIST_HEAD(&sbi->ll_orphan_dentry_list);
71 ll_s2sbi_nocast(sb) = sbi;
73 generate_random_uuid(uuid);
74 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
76 spin_lock(&ll_sb_lock);
77 list_add_tail(&sbi->ll_list, &ll_super_blocks);
78 spin_unlock(&ll_sb_lock);
82 void lustre_free_sbi(struct super_block *sb)
84 struct ll_sb_info *sbi = ll_s2sbi(sb);
88 spin_lock(&ll_sb_lock);
89 list_del(&sbi->ll_list);
90 spin_unlock(&ll_sb_lock);
91 OBD_FREE(sbi, sizeof(*sbi));
93 ll_s2sbi_nocast(sb) = NULL;
97 int lustre_common_fill_super(struct super_block *sb, char *mdc, char *osc)
99 struct inode *root = 0;
100 struct ll_sb_info *sbi = ll_s2sbi(sb);
101 struct obd_device *obd;
102 struct ll_fid rootfid;
103 struct obd_statfs osfs;
104 struct ptlrpc_request *request = NULL;
105 struct lustre_handle osc_conn = {0, };
106 struct lustre_handle mdc_conn = {0, };
111 obd = class_name2obd(mdc);
113 CERROR("MDC %s: not setup or attached\n", mdc);
117 if (proc_lustre_fs_root) {
118 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
121 CERROR("could not register mount in /proc/lustre");
124 err = obd_connect(&mdc_conn, obd, &sbi->ll_sb_uuid);
126 CERROR("An MDS (mdc %s) is performing recovery, of which this"
127 " client is not a part. Please wait for recovery to "
128 "complete, abort, or time out.\n", mdc);
131 CERROR("cannot connect to %s: rc = %d\n", mdc, err);
134 sbi->ll_mdc_exp = class_conn2export(&mdc_conn);
136 err = obd_statfs(obd, &osfs, jiffies - HZ);
140 LASSERT(osfs.os_bsize);
141 sb->s_blocksize = osfs.os_bsize;
142 sb->s_blocksize_bits = log2(osfs.os_bsize);
143 sb->s_magic = LL_SUPER_MAGIC;
144 sb->s_maxbytes = PAGE_CACHE_MAXBYTES;
145 sbi->ll_namelen = osfs.os_namelen;
147 devno = get_uuid2int(sbi2mdc(sbi)->cl_import->imp_target_uuid.uuid,
148 strlen(sbi2mdc(sbi)->cl_import->imp_target_uuid.uuid));
149 /* s_dev is also used in lt_compare() to compare two fs */
152 obd = class_name2obd(osc);
154 CERROR("OSC %s: not setup or attached\n", osc);
158 err = obd_connect(&osc_conn, obd, &sbi->ll_sb_uuid);
160 CERROR("An OST (osc %s) is performing recovery, of which this"
161 " client is not a part. Please wait for recovery to "
162 "complete, abort, or time out.\n", osc);
165 CERROR("cannot connect to %s: rc = %d\n", osc, err);
168 sbi->ll_osc_exp = class_conn2export(&osc_conn);
170 mdc_init_ea_size(sbi->ll_mdc_exp, sbi->ll_osc_exp);
172 if (!ll_async_page_slab) {
173 ll_async_page_slab_size =
174 size_round(sizeof(struct ll_async_page)) +
175 obd_prep_async_page(sbi->ll_osc_exp, NULL, NULL, NULL,
176 0, NULL, NULL, NULL);
177 ll_async_page_slab = kmem_cache_create("ll_async_page",
178 ll_async_page_slab_size,
180 if (!ll_async_page_slab)
181 GOTO(out_osc, -ENOMEM);
184 err = mdc_getstatus(sbi->ll_mdc_exp, &rootfid);
186 CERROR("cannot mds_connect: rc = %d\n", err);
189 CDEBUG(D_SUPER, "rootfid "LPU64"\n", rootfid.id);
190 sbi->ll_rootino = rootfid.id;
192 sb->s_op = &lustre_super_operations;
195 * XXX: move this to after cbd setup? */
196 err = mdc_getattr(sbi->ll_mdc_exp, &rootfid,
197 OBD_MD_FLNOTOBD|OBD_MD_FLBLOCKS, 0, &request);
199 CERROR("mdc_getattr failed for root: rc = %d\n", err);
203 err = mdc_req2lustre_md(request, 0, sbi->ll_osc_exp, &md);
205 CERROR("failed to understand root inode md: rc = %d\n",err);
206 ptlrpc_req_finished (request);
210 LASSERT(sbi->ll_rootino != 0);
211 root = ll_iget(sb, sbi->ll_rootino, &md);
213 ptlrpc_req_finished(request);
215 if (root == NULL || is_bad_inode(root)) {
216 /* XXX might need iput() for bad inode */
217 CERROR("lustre_lite: bad iget4 for root\n");
218 GOTO(out_root, err = -EBADF);
221 err = ll_close_thread_start(&sbi->ll_lcq);
223 CERROR("cannot start close thread: rc %d\n", err);
227 /* making vm readahead 0 for 2.4.x. In the case of 2.6.x,
228 backing dev info assigned to inode mapping is used for
229 determining maximal readahead. */
230 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
231 !defined(KERNEL_HAS_AS_MAX_READAHEAD)
232 /* bug 2805 - set VM readahead to zero */
233 vm_max_readahead = vm_min_readahead = 0;
236 sb->s_root = d_alloc_root(root);
243 obd_disconnect(sbi->ll_osc_exp, 0);
245 obd_disconnect(sbi->ll_mdc_exp, 0);
247 lprocfs_unregister_mountpoint(sbi);
251 void lustre_dump_inode(struct inode *inode)
253 struct list_head *tmp;
254 int dentry_count = 0;
255 BDEVNAME_DECLARE_STORAGE(buf);
257 LASSERT(inode != NULL);
259 list_for_each(tmp, &inode->i_dentry)
262 CERROR("inode %p dump: dev=%s:%lu, mode=%o, count=%u, %d dentries\n",
263 inode, ll_bdevname(inode->i_sb, buf), inode->i_ino,
264 inode->i_mode, atomic_read(&inode->i_count), dentry_count);
267 void lustre_dump_dentry(struct dentry *dentry, int recur)
269 struct list_head *tmp;
272 LASSERT(dentry != NULL);
274 list_for_each(tmp, &dentry->d_subdirs)
277 CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
278 " flags=0x%x, vfs_flags=0x%lx, fsdata=%p, %d subdirs\n", dentry,
279 dentry->d_name.len, dentry->d_name.name,
280 dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
281 dentry->d_parent, dentry->d_inode, atomic_read(&dentry->d_count),
282 dentry->d_flags, dentry->d_vfs_flags, dentry->d_fsdata, subdirs);
283 if (dentry->d_inode != NULL)
284 lustre_dump_inode(dentry->d_inode);
289 list_for_each(tmp, &dentry->d_subdirs) {
290 struct dentry *d = list_entry(tmp, struct dentry, d_child);
291 lustre_dump_dentry(d, recur - 1);
295 void lustre_common_put_super(struct super_block *sb)
297 struct ll_sb_info *sbi = ll_s2sbi(sb);
298 struct hlist_node *tmp, *next;
301 ll_close_thread_shutdown(sbi->ll_lcq);
303 list_del(&sbi->ll_conn_chain);
304 obd_disconnect(sbi->ll_osc_exp, 0);
306 lprocfs_unregister_mountpoint(sbi);
307 if (sbi->ll_proc_root) {
308 lprocfs_remove(sbi->ll_proc_root);
309 sbi->ll_proc_root = NULL;
312 obd_disconnect(sbi->ll_mdc_exp, 0);
314 // We do this to get rid of orphaned dentries. That is not really trw.
315 hlist_for_each_safe(tmp, next, &sbi->ll_orphan_dentry_list) {
316 struct dentry *dentry = hlist_entry(tmp, struct dentry, d_hash);
317 CWARN("found orphan dentry %.*s (%p->%p) at unmount, dumping "
318 "before and after shrink_dcache_parent\n",
319 dentry->d_name.len, dentry->d_name.name, dentry, next);
320 lustre_dump_dentry(dentry, 1);
321 shrink_dcache_parent(dentry);
322 lustre_dump_dentry(dentry, 1);
328 char *ll_read_opt(const char *opt, char *data)
334 CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
335 if (strncmp(opt, data, strlen(opt)))
337 if ((value = strchr(data, '=')) == NULL)
341 OBD_ALLOC(retval, strlen(value) + 1);
343 CERROR("out of memory!\n");
347 memcpy(retval, value, strlen(value)+1);
348 CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
352 int ll_set_opt(const char *opt, char *data, int fl)
356 CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
357 if (strncmp(opt, data, strlen(opt)))
363 void ll_options(char *options, char **ost, char **mdc, int *flags)
366 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
367 char *opt_ptr = options;
376 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
377 for (this_char = strtok (options, ",");
379 this_char = strtok (NULL, ",")) {
381 while ((this_char = strsep (&opt_ptr, ",")) != NULL) {
383 CDEBUG(D_SUPER, "this_char %s\n", this_char);
384 if (!*ost && (*ost = ll_read_opt("osc", this_char)))
386 if (!*mdc && (*mdc = ll_read_opt("mdc", this_char)))
388 if (!(*flags & LL_SBI_NOLCK) &&
389 ((*flags) = (*flags) |
390 ll_set_opt("nolock", this_char,
397 void ll_lli_init(struct ll_inode_info *lli)
399 sema_init(&lli->lli_open_sem, 1);
401 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
402 spin_lock_init(&lli->lli_lock);
403 INIT_LIST_HEAD(&lli->lli_pending_write_llaps);
404 lli->lli_inode_magic = LLI_INODE_MAGIC;
407 int ll_fill_super(struct super_block *sb, void *data, int silent)
409 struct ll_sb_info *sbi;
415 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
417 sbi = lustre_init_sbi(sb);
421 ll_options(data, &osc, &mdc, &sbi->ll_flags);
425 GOTO(out, err = -EINVAL);
430 GOTO(out, err = -EINVAL);
433 err = lustre_common_fill_super(sb, mdc, osc);
439 OBD_FREE(mdc, strlen(mdc) + 1);
441 OBD_FREE(osc, strlen(osc) + 1);
444 } /* ll_read_super */
446 int lustre_process_log(struct lustre_mount_data *lmd, char * profile,
447 struct config_llog_instance *cfg, int allow_recov)
449 struct lustre_cfg lcfg;
450 struct portals_cfg pcfg;
451 char * peer = "MDS_PEER_UUID";
452 struct obd_device *obd;
453 struct lustre_handle mdc_conn = {0, };
454 struct obd_export *exp;
455 char * name = "mdc_dev";
457 struct obd_uuid mdc_uuid;
458 struct llog_ctxt *ctxt;
463 if (lmd_bad_magic(lmd))
466 generate_random_uuid(uuid);
467 class_uuid_unparse(uuid, &mdc_uuid);
469 if (lmd->lmd_local_nid) {
470 PCFG_INIT(pcfg, NAL_CMD_REGISTER_MYNID);
471 pcfg.pcfg_nal = lmd->lmd_nal;
472 pcfg.pcfg_nid = lmd->lmd_local_nid;
473 err = libcfs_nal_cmd(&pcfg);
478 if (lmd->lmd_nal == SOCKNAL ||
479 lmd->lmd_nal == OPENIBNAL ||
480 lmd->lmd_nal == IIBNAL ||
481 lmd->lmd_nal == VIBNAL ||
482 lmd->lmd_nal == RANAL) {
483 PCFG_INIT(pcfg, NAL_CMD_ADD_PEER);
484 pcfg.pcfg_nal = lmd->lmd_nal;
485 pcfg.pcfg_nid = lmd->lmd_server_nid;
486 pcfg.pcfg_id = lmd->lmd_server_ipaddr;
487 pcfg.pcfg_misc = lmd->lmd_port;
488 err = libcfs_nal_cmd(&pcfg);
493 LCFG_INIT(lcfg, LCFG_ADD_UUID, name);
494 lcfg.lcfg_nid = lmd->lmd_server_nid;
495 lcfg.lcfg_inllen1 = strlen(peer) + 1;
496 lcfg.lcfg_inlbuf1 = peer;
497 lcfg.lcfg_nal = lmd->lmd_nal;
498 err = class_process_config(&lcfg);
500 GOTO(out_del_conn, err);
502 LCFG_INIT(lcfg, LCFG_ATTACH, name);
503 lcfg.lcfg_inlbuf1 = "mdc";
504 lcfg.lcfg_inllen1 = strlen(lcfg.lcfg_inlbuf1) + 1;
505 lcfg.lcfg_inlbuf2 = mdc_uuid.uuid;
506 lcfg.lcfg_inllen2 = strlen(lcfg.lcfg_inlbuf2) + 1;
507 err = class_process_config(&lcfg);
509 GOTO(out_del_uuid, err);
511 LCFG_INIT(lcfg, LCFG_SETUP, name);
512 lcfg.lcfg_inlbuf1 = lmd->lmd_mds;
513 lcfg.lcfg_inllen1 = strlen(lcfg.lcfg_inlbuf1) + 1;
514 lcfg.lcfg_inlbuf2 = peer;
515 lcfg.lcfg_inllen2 = strlen(lcfg.lcfg_inlbuf2) + 1;
516 err = class_process_config(&lcfg);
518 GOTO(out_detach, err);
520 obd = class_name2obd(name);
522 GOTO(out_cleanup, err = -EINVAL);
524 /* Disable initial recovery on this import */
525 err = obd_set_info(obd->obd_self_export,
526 strlen("initial_recov"), "initial_recov",
527 sizeof(allow_recov), &allow_recov);
529 GOTO(out_cleanup, err);
531 err = obd_connect(&mdc_conn, obd, &mdc_uuid);
533 CERROR("cannot connect to %s: rc = %d\n", lmd->lmd_mds, err);
534 GOTO(out_cleanup, err);
537 exp = class_conn2export(&mdc_conn);
539 ctxt = llog_get_context(exp->exp_obd, LLOG_CONFIG_REPL_CTXT);
541 rc = class_config_parse_llog(ctxt, profile, cfg);
544 * For debugging, it's useful to just dump the log
546 rc = class_config_dump_llog(ctxt, profile, cfg);
549 CERROR("class_config_parse_llog failed: rc = %d\n", rc);
552 err = obd_disconnect(exp, 0);
555 LCFG_INIT(lcfg, LCFG_CLEANUP, name);
556 err = class_process_config(&lcfg);
561 LCFG_INIT(lcfg, LCFG_DETACH, name);
562 err = class_process_config(&lcfg);
567 LCFG_INIT(lcfg, LCFG_DEL_UUID, name);
568 lcfg.lcfg_inllen1 = strlen(peer) + 1;
569 lcfg.lcfg_inlbuf1 = peer;
570 err = class_process_config(&lcfg);
573 if (lmd->lmd_nal == SOCKNAL ||
574 lmd->lmd_nal == OPENIBNAL ||
575 lmd->lmd_nal == IIBNAL ||
576 lmd->lmd_nal == VIBNAL ||
577 lmd->lmd_nal == RANAL) {
578 PCFG_INIT(pcfg, NAL_CMD_DEL_PEER);
579 pcfg.pcfg_nal = lmd->lmd_nal;
580 pcfg.pcfg_nid = lmd->lmd_server_nid;
581 pcfg.pcfg_flags = 1; /* single_share */
582 err = libcfs_nal_cmd(&pcfg);
593 static void lustre_manual_cleanup(struct ll_sb_info *sbi)
595 struct lustre_cfg lcfg;
596 struct obd_device *obd;
599 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) != NULL)
603 LCFG_INIT(lcfg, LCFG_CLEANUP, obd->obd_name);
604 err = class_process_config(&lcfg);
606 CERROR("cleanup failed: %s\n", obd->obd_name);
610 LCFG_INIT(lcfg, LCFG_DETACH, obd->obd_name);
611 err = class_process_config(&lcfg);
613 CERROR("detach failed: %s\n", obd->obd_name);
618 if (sbi->ll_lmd != NULL)
619 class_del_profile(sbi->ll_lmd->lmd_profile);
622 int lustre_fill_super(struct super_block *sb, void *data, int silent)
624 struct lustre_mount_data * lmd = data;
625 struct ll_sb_info *sbi;
631 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
632 if (lmd_bad_magic(lmd))
635 sbi = lustre_init_sbi(sb);
639 if (lmd->lmd_profile) {
640 struct lustre_profile *lprof;
641 struct config_llog_instance cfg;
644 if (lmd->lmd_mds[0] == '\0') {
645 CERROR("no mds name\n");
646 GOTO(out_free, err = -EINVAL);
649 OBD_ALLOC(sbi->ll_lmd, sizeof(*sbi->ll_lmd));
650 if (sbi->ll_lmd == NULL)
651 GOTO(out_free, err = -ENOMEM);
652 memcpy(sbi->ll_lmd, lmd, sizeof(*lmd));
654 /* generate a string unique to this super, let's try
655 the address of the super itself.*/
656 len = (sizeof(sb) * 2) + 1;
657 OBD_ALLOC(sbi->ll_instance, len);
658 if (sbi->ll_instance == NULL)
659 GOTO(out_free, err = -ENOMEM);
660 sprintf(sbi->ll_instance, "%p", sb);
662 cfg.cfg_instance = sbi->ll_instance;
663 cfg.cfg_uuid = sbi->ll_sb_uuid;
664 cfg.cfg_local_nid = lmd->lmd_local_nid;
665 err = lustre_process_log(lmd, lmd->lmd_profile, &cfg, 0);
667 CERROR("Unable to process log: %s\n", lmd->lmd_profile);
672 lprof = class_get_profile(lmd->lmd_profile);
674 CERROR("No profile found: %s\n", lmd->lmd_profile);
675 GOTO(out_free, err = -EINVAL);
678 OBD_FREE(osc, strlen(osc) + 1);
679 OBD_ALLOC(osc, strlen(lprof->lp_osc) +
680 strlen(sbi->ll_instance) + 2);
681 sprintf(osc, "%s-%s", lprof->lp_osc, sbi->ll_instance);
684 OBD_FREE(mdc, strlen(mdc) + 1);
685 OBD_ALLOC(mdc, strlen(lprof->lp_mdc) +
686 strlen(sbi->ll_instance) + 2);
687 sprintf(mdc, "%s-%s", lprof->lp_mdc, sbi->ll_instance);
692 GOTO(out_free, err = -EINVAL);
697 GOTO(out_free, err = -EINVAL);
700 err = lustre_common_fill_super(sb, mdc, osc);
707 OBD_FREE(mdc, strlen(mdc) + 1);
709 OBD_FREE(osc, strlen(osc) + 1);
715 int len = strlen(sbi->ll_lmd->lmd_profile) + sizeof("-clean")+1;
718 if (sbi->ll_instance != NULL) {
720 struct config_llog_instance cfg;
722 cfg.cfg_instance = sbi->ll_instance;
723 cfg.cfg_uuid = sbi->ll_sb_uuid;
725 OBD_ALLOC(cln_prof, len);
726 sprintf(cln_prof, "%s-clean", sbi->ll_lmd->lmd_profile);
728 err = lustre_process_log(sbi->ll_lmd, cln_prof, &cfg,
731 CERROR("Unable to process log: %s\n", cln_prof);
732 lustre_manual_cleanup(sbi);
734 OBD_FREE(cln_prof, len);
735 OBD_FREE(sbi->ll_instance, strlen(sbi->ll_instance)+ 1);
737 OBD_FREE(sbi->ll_lmd, sizeof(*sbi->ll_lmd));
742 } /* lustre_fill_super */
744 void lustre_put_super(struct super_block *sb)
746 struct obd_device *obd;
747 struct ll_sb_info *sbi = ll_s2sbi(sb);
748 int force_umount = 0;
751 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
752 obd = class_exp2obd(sbi->ll_mdc_exp);
754 force_umount = obd->obd_no_recov;
757 lustre_common_put_super(sb);
759 if (sbi->ll_lmd != NULL) {
761 int len = strlen(sbi->ll_lmd->lmd_profile) + sizeof("-clean")+1;
763 struct config_llog_instance cfg;
766 CERROR("force umount, doing manual cleanup\n");
767 lustre_manual_cleanup(sbi);
771 cfg.cfg_instance = sbi->ll_instance;
772 cfg.cfg_uuid = sbi->ll_sb_uuid;
774 OBD_ALLOC(cln_prof, len);
775 sprintf(cln_prof, "%s-clean", sbi->ll_lmd->lmd_profile);
777 err = lustre_process_log(sbi->ll_lmd, cln_prof, &cfg, 0);
779 CERROR("Unable to process log: %s, doing manual cleanup"
781 lustre_manual_cleanup(sbi);
784 OBD_FREE(cln_prof, len);
786 OBD_FREE(sbi->ll_lmd, sizeof(*sbi->ll_lmd));
787 OBD_FREE(sbi->ll_instance, strlen(sbi->ll_instance) + 1);
793 } /* lustre_put_super */
795 #ifdef HAVE_REGISTER_CACHE
796 #include <linux/cache_def.h>
797 #ifdef HAVE_CACHE_RETURN_INT
802 ll_shrink_cache(int priority, unsigned int gfp_mask)
804 struct ll_sb_info *sbi;
807 list_for_each_entry(sbi, &ll_super_blocks, ll_list)
808 count += llap_shrink_cache(sbi, priority);
810 #ifdef HAVE_CACHE_RETURN_INT
815 struct cache_definition ll_cache_definition = {
816 .name = "llap_cache",
817 .shrink = ll_shrink_cache
819 #endif /* HAVE_REGISTER_CACHE */
821 struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
823 struct inode *inode = NULL;
824 l_lock(&lock->l_resource->lr_namespace->ns_lock);
825 if (lock->l_ast_data) {
826 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
827 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
828 inode = igrab(lock->l_ast_data);
830 inode = lock->l_ast_data;
831 __LDLM_DEBUG(inode->i_state & I_FREEING ?
832 D_INFO : D_WARNING, lock,
833 "l_ast_data %p is bogus: magic %08x",
834 lock->l_ast_data, lli->lli_inode_magic);
838 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
842 static int null_if_equal(struct ldlm_lock *lock, void *data)
844 if (data == lock->l_ast_data) {
845 lock->l_ast_data = NULL;
847 if (lock->l_req_mode != lock->l_granted_mode)
848 LDLM_ERROR(lock,"clearing inode with ungranted lock");
851 return LDLM_ITER_CONTINUE;
854 void ll_clear_inode(struct inode *inode)
857 struct ll_inode_info *lli = ll_i2info(inode);
858 struct ll_sb_info *sbi = ll_i2sbi(inode);
861 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
862 inode->i_generation, inode);
864 ll_inode2fid(&fid, inode);
865 clear_bit(LLI_F_HAVE_MDS_SIZE_LOCK, &(ll_i2info(inode)->lli_flags));
866 mdc_change_cbdata(sbi->ll_mdc_exp, &fid, null_if_equal, inode);
869 obd_change_cbdata(sbi->ll_osc_exp, lli->lli_smd,
870 null_if_equal, inode);
873 obd_free_memmd(sbi->ll_osc_exp, &lli->lli_smd);
877 if (lli->lli_symlink_name) {
878 OBD_FREE(lli->lli_symlink_name,
879 strlen(lli->lli_symlink_name) + 1);
880 lli->lli_symlink_name = NULL;
882 lli->lli_inode_magic = LLI_INODE_DEAD;
887 /* If this inode has objects allocated to it (lsm != NULL), then the OST
888 * object(s) determine the file size and mtime. Otherwise, the MDS will
889 * keep these values until such a time that objects are allocated for it.
890 * We do the MDS operations first, as it is checking permissions for us.
891 * We don't to the MDS RPC if there is nothing that we want to store there,
892 * otherwise there is no harm in updating mtime/atime on the MDS if we are
893 * going to do an RPC anyways.
895 * If we are doing a truncate, we will send the mtime and ctime updates
896 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
897 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
900 int ll_setattr_raw(struct inode *inode, struct iattr *attr)
902 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
903 struct ll_sb_info *sbi = ll_i2sbi(inode);
904 struct ptlrpc_request *request = NULL;
905 struct mdc_op_data op_data;
906 int ia_valid = attr->ia_valid;
910 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu\n", inode->i_ino);
911 lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_SETATTR);
913 if (ia_valid & ATTR_SIZE) {
914 if (attr->ia_size > ll_file_maxbytes(inode)) {
915 CDEBUG(D_INODE, "file too large %llu > "LPU64"\n",
916 attr->ia_size, ll_file_maxbytes(inode));
920 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
923 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
924 if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
925 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
929 /* We mark all of the fields "set" so MDS/OST does not re-set them */
930 if (attr->ia_valid & ATTR_CTIME) {
931 attr->ia_ctime = CURRENT_TIME;
932 attr->ia_valid |= ATTR_CTIME_SET;
934 if (!(ia_valid & ATTR_ATIME_SET) && (attr->ia_valid & ATTR_ATIME)) {
935 attr->ia_atime = CURRENT_TIME;
936 attr->ia_valid |= ATTR_ATIME_SET;
938 if (!(ia_valid & ATTR_MTIME_SET) && (attr->ia_valid & ATTR_MTIME)) {
939 attr->ia_mtime = CURRENT_TIME;
940 attr->ia_valid |= ATTR_MTIME_SET;
943 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
944 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
945 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
948 attr->ia_valid &= ~ATTR_SIZE;
950 /* If only OST attributes being set on objects, don't do MDS RPC.
951 * In that case, we need to check permissions and update the local
952 * inode ourselves so we can call obdo_from_inode() always. */
953 if (ia_valid & (lsm ? ~(ATTR_SIZE | ATTR_FROM_OPEN | ATTR_RAW) : ~0)) {
955 ll_prepare_mdc_op_data(&op_data, inode, NULL, NULL, 0, 0);
957 rc = mdc_setattr(sbi->ll_mdc_exp, &op_data,
958 attr, NULL, 0, NULL, 0, &request);
961 ptlrpc_req_finished(request);
962 if (rc != -EPERM && rc != -EACCES)
963 CERROR("mdc_setattr fails: rc = %d\n", rc);
967 rc = mdc_req2lustre_md(request, 0, sbi->ll_osc_exp, &md);
969 ptlrpc_req_finished(request);
973 /* Won't invoke vmtruncate as we already cleared ATTR_SIZE,
974 * but needed to set timestamps backwards on utime. */
975 inode_setattr(inode, attr);
976 ll_update_inode(inode, md.body, md.lsm);
977 ptlrpc_req_finished(request);
979 if (!lsm || !S_ISREG(inode->i_mode)) {
980 CDEBUG(D_INODE, "no lsm: not setting attrs on OST\n");
984 /* The OST doesn't check permissions, but the alternative is
985 * a gratuitous RPC to the MDS. We already rely on the client
986 * to do read/write/truncate permission checks, so is mtime OK?
988 if (ia_valid & (ATTR_MTIME | ATTR_ATIME)) {
989 /* from sys_utime() */
990 if (!(ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET))) {
991 if (current->fsuid != inode->i_uid &&
992 (rc=ll_permission(inode,MAY_WRITE,NULL))!=0)
995 /* from inode_change_ok() */
996 if (current->fsuid != inode->i_uid &&
997 !capable(CAP_FOWNER))
1002 /* Won't invoke vmtruncate, as we already cleared ATTR_SIZE */
1003 inode_setattr(inode, attr);
1006 /* We really need to get our PW lock before we change inode->i_size.
1007 * If we don't we can race with other i_size updaters on our node, like
1008 * ll_file_read. We can also race with i_size propogation to other
1009 * nodes through dirtying and writeback of final cached pages. This
1010 * last one is especially bad for racing o_append users on other
1012 if (ia_valid & ATTR_SIZE) {
1013 ldlm_policy_data_t policy = { .l_extent = {attr->ia_size,
1015 struct lustre_handle lockh = { 0 };
1016 int err, ast_flags = 0;
1017 /* XXX when we fix the AST intents to pass the discard-range
1018 * XXX extent, make ast_flags always LDLM_AST_DISCARD_DATA
1020 if (attr->ia_size == 0)
1021 ast_flags = LDLM_AST_DISCARD_DATA;
1023 /* bug 1639: avoid write/truncate i_sem/DLM deadlock */
1024 LASSERT(atomic_read(&inode->i_sem.count) <= 0);
1026 UP_WRITE_I_ALLOC_SEM(inode);
1027 rc = ll_extent_lock(NULL, inode, lsm, LCK_PW, &policy, &lockh,
1029 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1030 DOWN_WRITE_I_ALLOC_SEM(inode);
1031 down(&inode->i_sem);
1033 down(&inode->i_sem);
1034 DOWN_WRITE_I_ALLOC_SEM(inode);
1039 rc = vmtruncate(inode, attr->ia_size);
1041 /* We need to drop the semaphore here, because this unlock may
1042 * result in a cancellation, which will need the i_sem */
1044 UP_WRITE_I_ALLOC_SEM(inode);
1045 /* unlock now as we don't mind others file lockers racing with
1046 * the mds updates below? */
1047 err = ll_extent_unlock(NULL, inode, lsm, LCK_PW, &lockh);
1048 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1049 DOWN_WRITE_I_ALLOC_SEM(inode);
1050 down(&inode->i_sem);
1052 down(&inode->i_sem);
1053 DOWN_WRITE_I_ALLOC_SEM(inode);
1056 CERROR("ll_extent_unlock failed: %d\n", err);
1060 } else if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET)) {
1063 CDEBUG(D_INODE, "set mtime on OST inode %lu to %lu\n",
1064 inode->i_ino, LTIME_S(attr->ia_mtime));
1065 oa.o_id = lsm->lsm_object_id;
1066 oa.o_valid = OBD_MD_FLID;
1067 obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
1068 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
1069 rc = obd_setattr(sbi->ll_osc_exp, &oa, lsm, NULL);
1071 CERROR("obd_setattr fails: rc=%d\n", rc);
1076 int ll_setattr(struct dentry *de, struct iattr *attr)
1078 LBUG(); /* code is unused, but leave this in case of VFS changes */
1082 int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1083 unsigned long max_age)
1085 struct ll_sb_info *sbi = ll_s2sbi(sb);
1086 struct obd_statfs obd_osfs;
1090 rc = obd_statfs(class_exp2obd(sbi->ll_mdc_exp), osfs, max_age);
1092 CERROR("mdc_statfs fails: rc = %d\n", rc);
1096 osfs->os_type = sb->s_magic;
1098 CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1099 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1101 rc = obd_statfs(class_exp2obd(sbi->ll_osc_exp), &obd_osfs, max_age);
1103 CERROR("obd_statfs fails: rc = %d\n", rc);
1107 CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1108 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1111 osfs->os_blocks = obd_osfs.os_blocks;
1112 osfs->os_bfree = obd_osfs.os_bfree;
1113 osfs->os_bavail = obd_osfs.os_bavail;
1115 /* If we don't have as many objects free on the OST as inodes
1116 * on the MDS, we reduce the total number of inodes to
1117 * compensate, so that the "inodes in use" number is correct.
1119 if (obd_osfs.os_ffree < osfs->os_ffree) {
1120 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1122 osfs->os_ffree = obd_osfs.os_ffree;
1128 int ll_statfs(struct super_block *sb, struct kstatfs *sfs)
1130 struct obd_statfs osfs;
1133 CDEBUG(D_VFSTRACE, "VFS Op:\n");
1134 lprocfs_counter_incr(ll_s2sbi(sb)->ll_stats, LPROC_LL_STAFS);
1136 /* For now we will always get up-to-date statfs values, but in the
1137 * future we may allow some amount of caching on the client (e.g.
1138 * from QOS or lprocfs updates). */
1139 rc = ll_statfs_internal(sb, &osfs, jiffies - 1);
1143 statfs_unpack(sfs, &osfs);
1145 if (sizeof(sfs->f_blocks) == 4) {
1146 while (osfs.os_blocks > ~0UL) {
1149 osfs.os_blocks >>= 1;
1150 osfs.os_bfree >>= 1;
1151 osfs.os_bavail >>= 1;
1155 sfs->f_blocks = osfs.os_blocks;
1156 sfs->f_bfree = osfs.os_bfree;
1157 sfs->f_bavail = osfs.os_bavail;
1162 void ll_update_inode(struct inode *inode, struct mds_body *body,
1163 struct lov_stripe_md *lsm)
1165 struct ll_inode_info *lli = ll_i2info(inode);
1167 LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1169 if (lli->lli_smd == NULL) {
1171 lli->lli_maxbytes = lsm->lsm_maxbytes;
1172 if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
1173 lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
1175 if (memcmp(lli->lli_smd, lsm, sizeof(*lsm))) {
1176 CERROR("lsm mismatch for inode %ld\n",
1178 CERROR("lli_smd:\n");
1179 dump_lsm(D_ERROR, lli->lli_smd);
1181 dump_lsm(D_ERROR, lsm);
1185 /* bug 2844 - limit i_blksize for broken user-space apps */
1186 LASSERTF(lsm->lsm_xfersize != 0, "%lu\n", lsm->lsm_xfersize);
1187 inode->i_blksize = min(lsm->lsm_xfersize, LL_MAX_BLKSIZE);
1188 if (lli->lli_smd != lsm)
1189 obd_free_memmd(ll_i2obdexp(inode), &lsm);
1192 if (body->valid & OBD_MD_FLID)
1193 inode->i_ino = body->ino;
1194 if (body->valid & OBD_MD_FLATIME)
1195 LTIME_S(inode->i_atime) = body->atime;
1196 if (body->valid & OBD_MD_FLMTIME &&
1197 body->mtime > LTIME_S(inode->i_mtime)) {
1198 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %u\n",
1199 inode->i_ino, LTIME_S(inode->i_mtime), body->mtime);
1200 LTIME_S(inode->i_mtime) = body->mtime;
1202 if (body->valid & OBD_MD_FLCTIME &&
1203 body->ctime > LTIME_S(inode->i_ctime))
1204 LTIME_S(inode->i_ctime) = body->ctime;
1205 if (body->valid & OBD_MD_FLMODE)
1206 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1207 if (body->valid & OBD_MD_FLTYPE)
1208 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1209 if (body->valid & OBD_MD_FLUID)
1210 inode->i_uid = body->uid;
1211 if (body->valid & OBD_MD_FLGID)
1212 inode->i_gid = body->gid;
1213 if (body->valid & OBD_MD_FLFLAGS)
1214 inode->i_flags = body->flags;
1215 if (body->valid & OBD_MD_FLNLINK)
1216 inode->i_nlink = body->nlink;
1217 if (body->valid & OBD_MD_FLGENER)
1218 inode->i_generation = body->generation;
1219 if (body->valid & OBD_MD_FLRDEV)
1220 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1221 inode->i_rdev = body->rdev;
1223 inode->i_rdev = old_decode_dev(body->rdev);
1225 if (body->valid & OBD_MD_FLSIZE)
1226 inode->i_size = body->size;
1227 if (body->valid & OBD_MD_FLBLOCKS)
1228 inode->i_blocks = body->blocks;
1230 if (body->valid & OBD_MD_FLSIZE)
1231 set_bit(LLI_F_HAVE_MDS_SIZE_LOCK, &lli->lli_flags);
1234 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
1235 static struct backing_dev_info ll_backing_dev_info = {
1236 .ra_pages = 0, /* No readahead */
1237 .memory_backed = 0, /* Does contribute to dirty memory */
1241 void ll_read_inode2(struct inode *inode, void *opaque)
1243 struct lustre_md *md = opaque;
1244 struct ll_inode_info *lli = ll_i2info(inode);
1247 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1248 inode->i_generation, inode);
1252 LASSERT(!lli->lli_smd);
1254 /* Core attributes from the MDS first. This is a new inode, and
1255 * the VFS doesn't zero times in the core inode so we have to do
1256 * it ourselves. They will be overwritten by either MDS or OST
1257 * attributes - we just need to make sure they aren't newer. */
1258 LTIME_S(inode->i_mtime) = 0;
1259 LTIME_S(inode->i_atime) = 0;
1260 LTIME_S(inode->i_ctime) = 0;
1262 ll_update_inode(inode, md->body, md->lsm);
1264 /* OIDEBUG(inode); */
1266 if (S_ISREG(inode->i_mode)) {
1267 inode->i_op = &ll_file_inode_operations;
1268 inode->i_fop = &ll_file_operations;
1269 inode->i_mapping->a_ops = &ll_aops;
1271 } else if (S_ISDIR(inode->i_mode)) {
1272 inode->i_op = &ll_dir_inode_operations;
1273 inode->i_fop = &ll_dir_operations;
1274 inode->i_mapping->a_ops = &ll_dir_aops;
1276 } else if (S_ISLNK(inode->i_mode)) {
1277 inode->i_op = &ll_fast_symlink_inode_operations;
1280 inode->i_op = &ll_special_inode_operations;
1282 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1283 init_special_inode(inode, inode->i_mode,
1284 kdev_t_to_nr(inode->i_rdev));
1286 /* initializing backing dev info. */
1287 inode->i_mapping->backing_dev_info = &ll_backing_dev_info;
1289 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1291 lli->ll_save_ifop = inode->i_fop;
1293 if (S_ISCHR(inode->i_mode))
1294 inode->i_fop = &ll_special_chr_inode_fops;
1295 else if (S_ISBLK(inode->i_mode))
1296 inode->i_fop = &ll_special_blk_inode_fops;
1297 else if (S_ISFIFO(inode->i_mode))
1298 inode->i_fop = &ll_special_fifo_inode_fops;
1299 else if (S_ISSOCK(inode->i_mode))
1300 inode->i_fop = &ll_special_sock_inode_fops;
1305 int ll_iocontrol(struct inode *inode, struct file *file,
1306 unsigned int cmd, unsigned long arg)
1308 struct ll_sb_info *sbi = ll_i2sbi(inode);
1309 struct ptlrpc_request *req = NULL;
1314 case EXT3_IOC_GETFLAGS: {
1316 unsigned long valid = OBD_MD_FLFLAGS;
1317 struct mds_body *body;
1319 ll_inode2fid(&fid, inode);
1320 rc = mdc_getattr(sbi->ll_mdc_exp, &fid, valid, 0, &req);
1322 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1326 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*body));
1328 if (body->flags & S_APPEND)
1329 flags |= EXT3_APPEND_FL;
1330 if (body->flags & S_IMMUTABLE)
1331 flags |= EXT3_IMMUTABLE_FL;
1332 if (body->flags & S_NOATIME)
1333 flags |= EXT3_NOATIME_FL;
1335 ptlrpc_req_finished (req);
1337 RETURN(put_user(flags, (int *)arg));
1339 case EXT3_IOC_SETFLAGS: {
1340 struct mdc_op_data op_data;
1343 struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
1345 if (get_user(flags, (int *)arg))
1352 ll_prepare_mdc_op_data(&op_data, inode, NULL, NULL, 0, 0);
1354 memset(&attr, 0x0, sizeof(attr));
1355 attr.ia_attr_flags = flags;
1356 attr.ia_valid |= ATTR_ATTR_FLAG;
1358 rc = mdc_setattr(sbi->ll_mdc_exp, &op_data,
1359 &attr, NULL, 0, NULL, 0, &req);
1361 ptlrpc_req_finished(req);
1362 if (rc != -EPERM && rc != -EACCES)
1363 CERROR("mdc_setattr fails: rc = %d\n", rc);
1367 ptlrpc_req_finished(req);
1369 oa->o_id = lsm->lsm_object_id;
1370 oa->o_flags = flags;
1371 oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS;
1373 rc = obd_setattr(sbi->ll_osc_exp, oa, lsm, NULL);
1376 if (rc != -EPERM && rc != -EACCES)
1377 CERROR("mdc_setattr fails: rc = %d\n", rc);
1381 if (flags & EXT3_APPEND_FL)
1382 inode->i_flags |= S_APPEND;
1384 inode->i_flags &= ~S_APPEND;
1385 if (flags & EXT3_IMMUTABLE_FL)
1386 inode->i_flags |= S_IMMUTABLE;
1388 inode->i_flags &= ~S_IMMUTABLE;
1389 if (flags & EXT3_NOATIME_FL)
1390 inode->i_flags |= S_NOATIME;
1392 inode->i_flags &= ~S_NOATIME;
1403 void ll_umount_begin(struct super_block *sb)
1405 struct ll_sb_info *sbi = ll_s2sbi(sb);
1406 struct obd_device *obd;
1407 struct obd_ioctl_data ioc_data = { 0 };
1409 CDEBUG(D_VFSTRACE, "VFS Op:\n");
1411 obd = class_exp2obd(sbi->ll_mdc_exp);
1413 CERROR("Invalid MDC connection handle "LPX64"\n",
1414 sbi->ll_mdc_exp->exp_handle.h_cookie);
1418 obd->obd_no_recov = 1;
1419 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_mdc_exp, sizeof ioc_data,
1422 obd = class_exp2obd(sbi->ll_osc_exp);
1424 CERROR("Invalid LOV connection handle "LPX64"\n",
1425 sbi->ll_osc_exp->exp_handle.h_cookie);
1430 obd->obd_no_recov = 1;
1431 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_osc_exp, sizeof ioc_data,
1434 /* Really, we'd like to wait until there are no requests outstanding,
1435 * and then continue. For now, we just invalidate the requests,
1436 * schedule, and hope.
1443 int ll_prep_inode(struct obd_export *exp, struct inode **inode,
1444 struct ptlrpc_request *req, int offset,struct super_block *sb)
1446 struct lustre_md md;
1449 rc = mdc_req2lustre_md(req, offset, exp, &md);
1454 ll_update_inode(*inode, md.body, md.lsm);
1457 *inode = ll_iget(sb, md.body->ino, &md);
1458 if (*inode == NULL || is_bad_inode(*inode)) {
1459 /* free the lsm if we allocated one above */
1461 obd_free_memmd(exp, &md.lsm);
1463 CERROR("new_inode -fatal: rc %d\n", rc);
1470 char *llap_origins[] = {
1471 [LLAP_ORIGIN_UNKNOWN] = "--",
1472 [LLAP_ORIGIN_READPAGE] = "rp",
1473 [LLAP_ORIGIN_READAHEAD] = "ra",
1474 [LLAP_ORIGIN_COMMIT_WRITE] = "cw",
1475 [LLAP_ORIGIN_WRITEPAGE] = "wp",
1478 struct ll_async_page *llite_pglist_next_llap(struct ll_sb_info *sbi,
1479 struct list_head *list)
1481 struct ll_async_page *llap;
1482 struct list_head *pos;
1484 list_for_each(pos, list) {
1485 if (pos == &sbi->ll_pglist)
1487 llap = list_entry(pos, struct ll_async_page, llap_pglist_item);
1488 if (llap->llap_page == NULL)