4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * cl_device and cl_device_type implementation for VVP layer.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_LLITE
41 #include "llite_internal.h"
42 #include "vvp_internal.h"
44 /*****************************************************************************
46 * Vvp device and device type functions.
51 * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
52 * "llite_" (var. "ll_") prefix.
55 static struct kmem_cache *ll_thread_kmem;
56 struct kmem_cache *vvp_lock_kmem;
57 struct kmem_cache *vvp_object_kmem;
58 static struct kmem_cache *vvp_session_kmem;
59 static struct kmem_cache *vvp_thread_kmem;
61 static struct lu_kmem_descr vvp_caches[] = {
63 .ckd_cache = &ll_thread_kmem,
64 .ckd_name = "ll_thread_kmem",
65 .ckd_size = sizeof(struct ll_thread_info),
68 .ckd_cache = &vvp_lock_kmem,
69 .ckd_name = "vvp_lock_kmem",
70 .ckd_size = sizeof(struct vvp_lock),
73 .ckd_cache = &vvp_object_kmem,
74 .ckd_name = "vvp_object_kmem",
75 .ckd_size = sizeof(struct vvp_object),
78 .ckd_cache = &vvp_session_kmem,
79 .ckd_name = "vvp_session_kmem",
80 .ckd_size = sizeof (struct vvp_session)
83 .ckd_cache = &vvp_thread_kmem,
84 .ckd_name = "vvp_thread_kmem",
85 .ckd_size = sizeof(struct vvp_thread_info),
92 static void *ll_thread_key_init(const struct lu_context *ctx,
93 struct lu_context_key *key)
95 struct ll_thread_info *lti;
97 OBD_SLAB_ALLOC_PTR_GFP(lti, ll_thread_kmem, GFP_NOFS);
99 lti = ERR_PTR(-ENOMEM);
104 static void ll_thread_key_fini(const struct lu_context *ctx,
105 struct lu_context_key *key, void *data)
107 struct ll_thread_info *lti = data;
109 OBD_SLAB_FREE_PTR(lti, ll_thread_kmem);
112 struct lu_context_key ll_thread_key = {
113 .lct_tags = LCT_CL_THREAD,
114 .lct_init = ll_thread_key_init,
115 .lct_fini = ll_thread_key_fini,
118 static void *vvp_session_key_init(const struct lu_context *ctx,
119 struct lu_context_key *key)
121 struct vvp_session *session;
123 OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS);
125 session = ERR_PTR(-ENOMEM);
129 static void vvp_session_key_fini(const struct lu_context *ctx,
130 struct lu_context_key *key, void *data)
132 struct vvp_session *session = data;
133 OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
136 struct lu_context_key vvp_session_key = {
137 .lct_tags = LCT_SESSION,
138 .lct_init = vvp_session_key_init,
139 .lct_fini = vvp_session_key_fini
142 static void *vvp_thread_key_init(const struct lu_context *ctx,
143 struct lu_context_key *key)
145 struct vvp_thread_info *vti;
147 OBD_SLAB_ALLOC_PTR_GFP(vti, vvp_thread_kmem, GFP_NOFS);
149 vti = ERR_PTR(-ENOMEM);
153 static void vvp_thread_key_fini(const struct lu_context *ctx,
154 struct lu_context_key *key, void *data)
156 struct vvp_thread_info *vti = data;
157 OBD_SLAB_FREE_PTR(vti, vvp_thread_kmem);
160 struct lu_context_key vvp_thread_key = {
161 .lct_tags = LCT_CL_THREAD,
162 .lct_init = vvp_thread_key_init,
163 .lct_fini = vvp_thread_key_fini,
166 /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
167 LU_TYPE_INIT_FINI(vvp, &ll_thread_key, &vvp_session_key, &vvp_thread_key);
169 static const struct lu_device_operations vvp_lu_ops = {
170 .ldo_object_alloc = vvp_object_alloc
173 static struct lu_device *vvp_device_free(const struct lu_env *env,
176 struct vvp_device *vdv = lu2vvp_dev(d);
177 struct cl_site *site = lu2cl_site(d->ld_site);
178 struct lu_device *next = cl2lu_dev(vdv->vdv_next);
180 if (d->ld_site != NULL) {
185 cl_device_fini(lu2cl_dev(d));
190 static struct lu_device *vvp_device_alloc(const struct lu_env *env,
191 struct lu_device_type *t,
192 struct lustre_cfg *cfg)
194 struct vvp_device *vdv;
195 struct lu_device *lud;
196 struct cl_site *site;
202 RETURN(ERR_PTR(-ENOMEM));
204 lud = &vdv->vdv_cl.cd_lu_dev;
205 cl_device_init(&vdv->vdv_cl, t);
206 vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
210 rc = cl_site_init(site, &vdv->vdv_cl);
212 rc = lu_site_init_finish(&site->cs_lu);
214 LASSERT(lud->ld_site == NULL);
215 CERROR("Cannot init lu_site, rc %d.\n", rc);
221 vvp_device_free(env, lud);
227 static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
228 const char *name, struct lu_device *next)
230 struct vvp_device *vdv;
235 vdv->vdv_next = lu2cl_dev(next);
237 LASSERT(d->ld_site != NULL && next->ld_type != NULL);
238 next->ld_site = d->ld_site;
239 rc = next->ld_type->ldt_ops->ldto_device_init(
240 env, next, next->ld_type->ldt_name, NULL);
243 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
248 static struct lu_device *vvp_device_fini(const struct lu_env *env,
251 return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
254 static const struct lu_device_type_operations vvp_device_type_ops = {
255 .ldto_init = vvp_type_init,
256 .ldto_fini = vvp_type_fini,
258 .ldto_start = vvp_type_start,
259 .ldto_stop = vvp_type_stop,
261 .ldto_device_alloc = vvp_device_alloc,
262 .ldto_device_free = vvp_device_free,
263 .ldto_device_init = vvp_device_init,
264 .ldto_device_fini = vvp_device_fini,
267 struct lu_device_type vvp_device_type = {
268 .ldt_tags = LU_DEVICE_CL,
269 .ldt_name = LUSTRE_VVP_NAME,
270 .ldt_ops = &vvp_device_type_ops,
271 .ldt_ctx_tags = LCT_CL_THREAD
275 * A mutex serializing calls to vvp_inode_fini() under extreme memory
276 * pressure, when environments cannot be allocated.
278 int vvp_global_init(void)
282 rc = lu_kmem_init(vvp_caches);
286 rc = lu_device_type_init(&vvp_device_type);
293 lu_kmem_fini(vvp_caches);
298 void vvp_global_fini(void)
300 lu_device_type_fini(&vvp_device_type);
301 lu_kmem_fini(vvp_caches);
304 /*****************************************************************************
306 * mirror obd-devices into cl devices.
310 int cl_sb_init(struct super_block *sb)
312 struct ll_sb_info *sbi;
313 struct cl_device *cl;
319 env = cl_env_get(&refcheck);
321 cl = cl_type_setup(env, NULL, &vvp_device_type,
322 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
325 sbi->ll_site = cl2lu_dev(cl)->ld_site;
327 cl_env_put(env, &refcheck);
333 int cl_sb_fini(struct super_block *sb)
335 struct ll_sb_info *sbi;
337 struct cl_device *cld;
343 env = cl_env_get(&refcheck);
348 cl_stack_fini(env, cld);
352 cl_env_put(env, &refcheck);
355 CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
356 result = PTR_ERR(env);
362 /****************************************************************************
364 * /proc/fs/lustre/llite/$MNT/dump_page_cache
366 ****************************************************************************/
369 * To represent contents of a page cache as a byte stream, following
370 * information if encoded in 64bit offset:
372 * - file hash bucket in lu_site::ls_hash[] 28bits
374 * - how far file is from bucket head 4bits
376 * - page index 32bits
378 * First two data identify a file in the cache uniquely.
381 #define PGC_OBJ_SHIFT (32 + 4)
382 #define PGC_DEPTH_SHIFT (32)
384 struct vvp_pgcache_id {
390 struct lu_object_header *vpi_obj;
393 static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
395 CLASSERT(sizeof(pos) == sizeof(__u64));
397 id->vpi_index = pos & 0xffffffff;
398 id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
399 id->vpi_bucket = ((unsigned long long)pos >> PGC_OBJ_SHIFT);
402 static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
405 ((__u64)id->vpi_index) |
406 ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) |
407 ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
410 static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
411 struct hlist_node *hnode, void *data)
413 struct vvp_pgcache_id *id = data;
414 struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
416 if (id->vpi_curdep-- > 0)
417 return 0; /* continue */
419 if (lu_object_is_dying(hdr))
422 cfs_hash_get(hs, hnode);
427 static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
428 struct lu_device *dev,
429 struct vvp_pgcache_id *id)
431 LASSERT(lu_device_is_cl(dev));
433 id->vpi_depth &= 0xf;
435 id->vpi_curdep = id->vpi_depth;
437 cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
438 vvp_pgcache_obj_get, id);
439 if (id->vpi_obj != NULL) {
440 struct lu_object *lu_obj;
442 lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
443 if (lu_obj != NULL) {
444 lu_object_ref_add(lu_obj, "dump", current);
445 return lu2cl(lu_obj);
447 lu_object_put(env, lu_object_top(id->vpi_obj));
449 } else if (id->vpi_curdep > 0) {
455 static loff_t vvp_pgcache_find(const struct lu_env *env,
456 struct lu_device *dev, loff_t pos)
458 struct cl_object *clob;
459 struct lu_site *site;
460 struct vvp_pgcache_id id;
463 vvp_pgcache_id_unpack(pos, &id);
466 if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
468 clob = vvp_pgcache_obj(env, dev, &id);
470 struct inode *inode = vvp_object_inode(clob);
474 nr = find_get_pages_contig(inode->i_mapping,
475 id.vpi_index, 1, &vmpage);
477 id.vpi_index = vmpage->index;
478 /* Cant support over 16T file */
479 nr = !(vmpage->index > 0xffffffff);
483 lu_object_ref_del(&clob->co_lu, "dump", current);
484 cl_object_put(env, clob);
486 return vvp_pgcache_id_pack(&id);
488 /* to the next object. */
491 if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
497 #define seq_page_flag(seq, page, flag, has_flags) do { \
498 if (test_bit(PG_##flag, &(page)->flags)) { \
499 seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
504 static void vvp_pgcache_page_show(const struct lu_env *env,
505 struct seq_file *seq, struct cl_page *page)
507 struct vvp_page *vpg;
511 vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
512 vmpage = vpg->vpg_page;
513 seq_printf(seq, " %5i | %p %p %s %s %s | %p "DFID"(%p) %lu %u [",
517 vpg->vpg_defer_uptodate ? "du" : "- ",
518 PageWriteback(vmpage) ? "wb" : "-",
520 PFID(ll_inode2fid(vmpage->mapping->host)),
521 vmpage->mapping->host, vmpage->index,
524 seq_page_flag(seq, vmpage, locked, has_flags);
525 seq_page_flag(seq, vmpage, error, has_flags);
526 seq_page_flag(seq, vmpage, referenced, has_flags);
527 seq_page_flag(seq, vmpage, uptodate, has_flags);
528 seq_page_flag(seq, vmpage, dirty, has_flags);
529 seq_page_flag(seq, vmpage, writeback, has_flags);
530 seq_printf(seq, "%s]\n", has_flags ? "" : "-");
533 static int vvp_pgcache_show(struct seq_file *f, void *v)
536 struct ll_sb_info *sbi;
537 struct cl_object *clob;
539 struct vvp_pgcache_id id;
543 env = cl_env_get(&refcheck);
546 vvp_pgcache_id_unpack(pos, &id);
548 clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
550 struct inode *inode = vvp_object_inode(clob);
551 struct cl_page *page = NULL;
554 result = find_get_pages_contig(inode->i_mapping,
555 id.vpi_index, 1, &vmpage);
558 page = cl_vmpage_page(vmpage, clob);
564 seq_printf(f, "%8x@"DFID": ", id.vpi_index,
565 PFID(lu_object_fid(&clob->co_lu)));
567 vvp_pgcache_page_show(env, f, page);
568 cl_page_put(env, page);
570 seq_puts(f, "missing\n");
571 lu_object_ref_del(&clob->co_lu, "dump", current);
572 cl_object_put(env, clob);
574 seq_printf(f, "%llx missing\n", pos);
575 cl_env_put(env, &refcheck);
578 result = PTR_ERR(env);
582 static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
584 struct ll_sb_info *sbi;
590 env = cl_env_get(&refcheck);
593 if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
594 pos = ERR_PTR(-EFBIG);
596 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
601 cl_env_put(env, &refcheck);
606 static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
608 struct ll_sb_info *sbi;
612 env = cl_env_get(&refcheck);
615 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
618 cl_env_put(env, &refcheck);
623 static void vvp_pgcache_stop(struct seq_file *f, void *v)
628 static struct seq_operations vvp_pgcache_ops = {
629 .start = vvp_pgcache_start,
630 .next = vvp_pgcache_next,
631 .stop = vvp_pgcache_stop,
632 .show = vvp_pgcache_show
635 static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
637 struct ll_sb_info *sbi = PDE_DATA(inode);
638 struct seq_file *seq;
641 result = seq_open(filp, &vvp_pgcache_ops);
643 seq = filp->private_data;
649 const struct file_operations vvp_dump_pgcache_file_ops = {
650 .owner = THIS_MODULE,
651 .open = vvp_dump_pgcache_seq_open,
654 .release = seq_release,