4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * cl_device and cl_device_type implementation for VVP layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
34 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
37 #define DEBUG_SUBSYSTEM S_LLITE
40 #include "llite_internal.h"
41 #include "vvp_internal.h"
42 #include <linux/kallsyms.h>
44 /*****************************************************************************
46 * Vvp device and device type functions.
51 * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
52 * "llite_" (var. "ll_") prefix.
55 static struct kmem_cache *ll_thread_kmem;
56 struct kmem_cache *vvp_object_kmem;
57 static struct kmem_cache *vvp_session_kmem;
58 static struct kmem_cache *vvp_thread_kmem;
60 static struct lu_kmem_descr vvp_caches[] = {
62 .ckd_cache = &ll_thread_kmem,
63 .ckd_name = "ll_thread_kmem",
64 .ckd_size = sizeof(struct ll_thread_info),
67 .ckd_cache = &vvp_object_kmem,
68 .ckd_name = "vvp_object_kmem",
69 .ckd_size = sizeof(struct vvp_object),
72 .ckd_cache = &vvp_session_kmem,
73 .ckd_name = "vvp_session_kmem",
74 .ckd_size = sizeof (struct vvp_session)
77 .ckd_cache = &vvp_thread_kmem,
78 .ckd_name = "vvp_thread_kmem",
79 .ckd_size = sizeof(struct vvp_thread_info),
86 static void *ll_thread_key_init(const struct lu_context *ctx,
87 struct lu_context_key *key)
89 struct ll_thread_info *lti;
91 OBD_SLAB_ALLOC_PTR_GFP(lti, ll_thread_kmem, GFP_NOFS);
93 lti = ERR_PTR(-ENOMEM);
98 static void ll_thread_key_fini(const struct lu_context *ctx,
99 struct lu_context_key *key, void *data)
101 struct ll_thread_info *lti = data;
103 OBD_SLAB_FREE_PTR(lti, ll_thread_kmem);
106 struct lu_context_key ll_thread_key = {
107 .lct_tags = LCT_CL_THREAD,
108 .lct_init = ll_thread_key_init,
109 .lct_fini = ll_thread_key_fini,
112 static void *vvp_session_key_init(const struct lu_context *ctx,
113 struct lu_context_key *key)
115 struct vvp_session *session;
117 OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS);
119 session = ERR_PTR(-ENOMEM);
123 static void vvp_session_key_fini(const struct lu_context *ctx,
124 struct lu_context_key *key, void *data)
126 struct vvp_session *session = data;
127 OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
130 struct lu_context_key vvp_session_key = {
131 .lct_tags = LCT_SESSION,
132 .lct_init = vvp_session_key_init,
133 .lct_fini = vvp_session_key_fini
136 static void *vvp_thread_key_init(const struct lu_context *ctx,
137 struct lu_context_key *key)
139 struct vvp_thread_info *vti;
141 OBD_SLAB_ALLOC_PTR_GFP(vti, vvp_thread_kmem, GFP_NOFS);
143 vti = ERR_PTR(-ENOMEM);
147 static void vvp_thread_key_fini(const struct lu_context *ctx,
148 struct lu_context_key *key, void *data)
150 struct vvp_thread_info *vti = data;
151 OBD_SLAB_FREE_PTR(vti, vvp_thread_kmem);
154 struct lu_context_key vvp_thread_key = {
155 .lct_tags = LCT_CL_THREAD,
156 .lct_init = vvp_thread_key_init,
157 .lct_fini = vvp_thread_key_fini,
160 /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
161 LU_TYPE_INIT_FINI(vvp, &ll_thread_key, &vvp_session_key, &vvp_thread_key);
163 static const struct lu_device_operations vvp_lu_ops = {
164 .ldo_object_alloc = vvp_object_alloc
167 static struct lu_device *vvp_device_free(const struct lu_env *env,
170 struct vvp_device *vdv = lu2vvp_dev(d);
171 struct cl_site *site = lu2cl_site(d->ld_site);
172 struct lu_device *next = cl2lu_dev(vdv->vdv_next);
174 if (d->ld_site != NULL) {
179 cl_device_fini(lu2cl_dev(d));
184 static struct lu_device *vvp_device_alloc(const struct lu_env *env,
185 struct lu_device_type *t,
186 struct lustre_cfg *cfg)
188 struct vvp_device *vdv;
189 struct lu_device *lud;
190 struct cl_site *site;
196 RETURN(ERR_PTR(-ENOMEM));
198 lud = &vdv->vdv_cl.cd_lu_dev;
199 cl_device_init(&vdv->vdv_cl, t);
200 vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
204 rc = cl_site_init(site, &vdv->vdv_cl);
206 rc = lu_site_init_finish(&site->cs_lu);
208 LASSERT(lud->ld_site == NULL);
209 CERROR("Cannot init lu_site, rc %d.\n", rc);
215 vvp_device_free(env, lud);
221 static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
222 const char *name, struct lu_device *next)
224 struct vvp_device *vdv;
229 vdv->vdv_next = lu2cl_dev(next);
231 LASSERT(d->ld_site != NULL && next->ld_type != NULL);
232 next->ld_site = d->ld_site;
233 rc = next->ld_type->ldt_ops->ldto_device_init(
234 env, next, next->ld_type->ldt_name, NULL);
237 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
242 static struct lu_device *vvp_device_fini(const struct lu_env *env,
245 return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
248 static const struct lu_device_type_operations vvp_device_type_ops = {
249 .ldto_init = vvp_type_init,
250 .ldto_fini = vvp_type_fini,
252 .ldto_start = vvp_type_start,
253 .ldto_stop = vvp_type_stop,
255 .ldto_device_alloc = vvp_device_alloc,
256 .ldto_device_free = vvp_device_free,
257 .ldto_device_init = vvp_device_init,
258 .ldto_device_fini = vvp_device_fini,
261 struct lu_device_type vvp_device_type = {
262 .ldt_tags = LU_DEVICE_CL,
263 .ldt_name = LUSTRE_VVP_NAME,
264 .ldt_ops = &vvp_device_type_ops,
265 .ldt_ctx_tags = LCT_CL_THREAD
268 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
269 unsigned int (*vvp_account_page_dirtied)(struct page *page,
270 struct address_space *mapping);
272 unsigned int ll_account_page_dirtied(struct page *page,
273 struct address_space *mapping)
275 /* must use __set_page_dirty, which means unlocking and
276 * relocking, which hurts performance.
278 ll_xa_unlock(&mapping->i_pages);
279 __set_page_dirty(page, mapping, 0);
280 ll_xa_lock(&mapping->i_pages);
286 * A mutex serializing calls to vvp_inode_fini() under extreme memory
287 * pressure, when environments cannot be allocated.
289 int vvp_global_init(void)
293 rc = lu_kmem_init(vvp_caches);
297 rc = lu_device_type_init(&vvp_device_type);
301 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
303 * Kernel v5.2-5678-gac1c3e4 no longer exports account_page_dirtied
305 vvp_account_page_dirtied = (void *)
306 cfs_kallsyms_lookup_name("account_page_dirtied");
307 if (!vvp_account_page_dirtied)
308 vvp_account_page_dirtied = ll_account_page_dirtied;
314 lu_kmem_fini(vvp_caches);
319 void vvp_global_fini(void)
321 lu_device_type_fini(&vvp_device_type);
322 lu_kmem_fini(vvp_caches);
325 /*****************************************************************************
327 * mirror obd-devices into cl devices.
331 int cl_sb_init(struct super_block *sb)
333 struct ll_sb_info *sbi;
334 struct cl_device *cl;
340 env = cl_env_get(&refcheck);
342 cl = cl_type_setup(env, NULL, &vvp_device_type,
343 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
346 sbi->ll_site = cl2lu_dev(cl)->ld_site;
348 cl_env_put(env, &refcheck);
354 int cl_sb_fini(struct super_block *sb)
356 struct ll_sb_info *sbi;
358 struct cl_device *cld;
364 env = cl_env_get(&refcheck);
369 cl_stack_fini(env, cld);
373 cl_env_put(env, &refcheck);
376 CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
377 result = PTR_ERR(env);
383 /****************************************************************************
385 * debugfs/lustre/llite/$MNT/dump_page_cache
387 ****************************************************************************/
389 struct vvp_seq_private {
390 struct ll_sb_info *vsp_sbi;
391 struct lu_env *vsp_env;
393 struct cl_object *vsp_clob;
394 struct rhashtable_iter vsp_iter;
397 * prev_pos is the 'pos' of the last object returned
398 * by ->start of ->next.
403 static struct page *vvp_pgcache_current(struct vvp_seq_private *priv)
405 struct lu_device *dev = &priv->vsp_sbi->ll_cl->cd_lu_dev;
406 struct lu_object_header *h;
407 struct page *vmpage = NULL;
409 rhashtable_walk_start(&priv->vsp_iter);
410 while ((h = rhashtable_walk_next(&priv->vsp_iter)) != NULL) {
415 if (PTR_ERR(h) == -EAGAIN)
420 if (!priv->vsp_clob) {
421 struct lu_object *lu_obj;
423 lu_obj = lu_object_get_first(h, dev);
427 priv->vsp_clob = lu2cl(lu_obj);
428 lu_object_ref_add_atomic(lu_obj, "dump", current);
429 priv->vsp_page_index = 0;
432 inode = vvp_object_inode(priv->vsp_clob);
433 nr = find_get_pages_contig(inode->i_mapping,
434 priv->vsp_page_index, 1, &vmpage);
436 priv->vsp_page_index = vmpage->index;
439 lu_object_ref_del(&priv->vsp_clob->co_lu, "dump", current);
440 cl_object_put(priv->vsp_env, priv->vsp_clob);
441 priv->vsp_clob = NULL;
442 priv->vsp_page_index = 0;
444 rhashtable_walk_stop(&priv->vsp_iter);
448 #define seq_page_flag(seq, page, flag, has_flags) do { \
449 if (test_bit(PG_##flag, &(page)->flags)) { \
450 seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
455 static void vvp_pgcache_page_show(const struct lu_env *env,
456 struct seq_file *seq, struct cl_page *page)
458 struct vvp_page *vpg;
462 vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
463 vmpage = vpg->vpg_page;
464 seq_printf(seq, " %5i | %p %p %s %s %s | %p "DFID"(%p) %lu %u [",
468 vpg->vpg_defer_uptodate ? "du" : "- ",
469 PageWriteback(vmpage) ? "wb" : "-",
471 PFID(ll_inode2fid(vmpage->mapping->host)),
472 vmpage->mapping->host, vmpage->index,
475 seq_page_flag(seq, vmpage, locked, has_flags);
476 seq_page_flag(seq, vmpage, error, has_flags);
477 seq_page_flag(seq, vmpage, referenced, has_flags);
478 seq_page_flag(seq, vmpage, uptodate, has_flags);
479 seq_page_flag(seq, vmpage, dirty, has_flags);
480 seq_page_flag(seq, vmpage, writeback, has_flags);
481 seq_printf(seq, "%s]\n", has_flags ? "" : "-");
484 static int vvp_pgcache_show(struct seq_file *f, void *v)
486 struct vvp_seq_private *priv = f->private;
487 struct page *vmpage = v;
488 struct cl_page *page;
490 seq_printf(f, "%8lx@" DFID ": ", vmpage->index,
491 PFID(lu_object_fid(&priv->vsp_clob->co_lu)));
493 page = cl_vmpage_page(vmpage, priv->vsp_clob);
498 vvp_pgcache_page_show(priv->vsp_env, f, page);
499 cl_page_put(priv->vsp_env, page);
501 seq_puts(f, "missing\n");
507 static void vvp_pgcache_rewind(struct vvp_seq_private *priv)
509 if (priv->vvp_prev_pos) {
510 struct lu_site *s = priv->vsp_sbi->ll_cl->cd_lu_dev.ld_site;
512 rhashtable_walk_exit(&priv->vsp_iter);
513 rhashtable_walk_enter(&s->ls_obj_hash, &priv->vsp_iter);
514 priv->vvp_prev_pos = 0;
515 if (priv->vsp_clob) {
516 lu_object_ref_del(&priv->vsp_clob->co_lu, "dump",
518 cl_object_put(priv->vsp_env, priv->vsp_clob);
520 priv->vsp_clob = NULL;
524 static struct page *vvp_pgcache_next_page(struct vvp_seq_private *priv)
526 priv->vsp_page_index += 1;
527 return vvp_pgcache_current(priv);
530 static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
532 struct vvp_seq_private *priv = f->private;
535 vvp_pgcache_rewind(priv);
536 } else if (*pos == priv->vvp_prev_pos) {
537 /* Return the current item */;
539 WARN_ON(*pos != priv->vvp_prev_pos + 1);
540 priv->vsp_page_index += 1;
543 priv->vvp_prev_pos = *pos;
544 return vvp_pgcache_current(priv);
547 static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
549 struct vvp_seq_private *priv = f->private;
551 WARN_ON(*pos != priv->vvp_prev_pos);
553 priv->vvp_prev_pos = *pos;
554 return vvp_pgcache_next_page(priv);
557 static void vvp_pgcache_stop(struct seq_file *f, void *v)
562 static const struct seq_operations vvp_pgcache_ops = {
563 .start = vvp_pgcache_start,
564 .next = vvp_pgcache_next,
565 .stop = vvp_pgcache_stop,
566 .show = vvp_pgcache_show
569 static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
571 struct vvp_seq_private *priv;
574 priv = __seq_open_private(filp, &vvp_pgcache_ops, sizeof(*priv));
578 priv->vsp_sbi = inode->i_private;
579 priv->vsp_env = cl_env_get(&priv->vsp_refcheck);
580 priv->vsp_clob = NULL;
581 if (IS_ERR(priv->vsp_env)) {
582 int err = PTR_ERR(priv->vsp_env);
584 seq_release_private(inode, filp);
588 s = priv->vsp_sbi->ll_cl->cd_lu_dev.ld_site;
589 rhashtable_walk_enter(&s->ls_obj_hash, &priv->vsp_iter);
594 static int vvp_dump_pgcache_seq_release(struct inode *inode, struct file *file)
596 struct seq_file *seq = file->private_data;
597 struct vvp_seq_private *priv = seq->private;
599 if (priv->vsp_clob) {
600 lu_object_ref_del(&priv->vsp_clob->co_lu, "dump", current);
601 cl_object_put(priv->vsp_env, priv->vsp_clob);
603 cl_env_put(priv->vsp_env, &priv->vsp_refcheck);
604 rhashtable_walk_exit(&priv->vsp_iter);
605 return seq_release_private(inode, file);
608 const struct file_operations vvp_dump_pgcache_file_ops = {
609 .owner = THIS_MODULE,
610 .open = vvp_dump_pgcache_seq_open,
613 .release = vvp_dump_pgcache_seq_release,