Whamcloud - gitweb
LU-5971 llite: rename ccc_device to vvp_device
[fs/lustre-release.git] / lustre / llite / vvp_dev.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * cl_device and cl_device_type implementation for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@intel.com>
40  */
41
42 #define DEBUG_SUBSYSTEM S_LLITE
43
44
45 #include <obd.h>
46 #include "llite_internal.h"
47 #include "vvp_internal.h"
48
49 /*****************************************************************************
50  *
51  * Vvp device and device type functions.
52  *
53  */
54
55 /*
56  * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
57  * "llite_" (var. "ll_") prefix.
58  */
59
60 static struct kmem_cache *vvp_thread_kmem;
61 static struct kmem_cache *vvp_session_kmem;
62 static struct lu_kmem_descr vvp_caches[] = {
63         {
64                 .ckd_cache = &vvp_thread_kmem,
65                 .ckd_name  = "vvp_thread_kmem",
66                 .ckd_size  = sizeof (struct vvp_thread_info),
67         },
68         {
69                 .ckd_cache = &vvp_session_kmem,
70                 .ckd_name  = "vvp_session_kmem",
71                 .ckd_size  = sizeof (struct vvp_session)
72         },
73         {
74                 .ckd_cache = NULL
75         }
76 };
77
78 static void *vvp_key_init(const struct lu_context *ctx,
79                           struct lu_context_key *key)
80 {
81         struct vvp_thread_info *info;
82
83         OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, GFP_NOFS);
84         if (info == NULL)
85                 info = ERR_PTR(-ENOMEM);
86         return info;
87 }
88
89 static void vvp_key_fini(const struct lu_context *ctx,
90                          struct lu_context_key *key, void *data)
91 {
92         struct vvp_thread_info *info = data;
93         OBD_SLAB_FREE_PTR(info, vvp_thread_kmem);
94 }
95
96 static void *vvp_session_key_init(const struct lu_context *ctx,
97                                   struct lu_context_key *key)
98 {
99         struct vvp_session *session;
100
101         OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, GFP_NOFS);
102         if (session == NULL)
103                 session = ERR_PTR(-ENOMEM);
104         return session;
105 }
106
107 static void vvp_session_key_fini(const struct lu_context *ctx,
108                                  struct lu_context_key *key, void *data)
109 {
110         struct vvp_session *session = data;
111         OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
112 }
113
114
115 struct lu_context_key vvp_key = {
116         .lct_tags = LCT_CL_THREAD,
117         .lct_init = vvp_key_init,
118         .lct_fini = vvp_key_fini
119 };
120
121 struct lu_context_key vvp_session_key = {
122         .lct_tags = LCT_SESSION,
123         .lct_init = vvp_session_key_init,
124         .lct_fini = vvp_session_key_fini
125 };
126
127 /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
128 LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
129
130 static const struct lu_device_operations vvp_lu_ops = {
131         .ldo_object_alloc      = vvp_object_alloc
132 };
133
134 static const struct cl_device_operations vvp_cl_ops = {
135         .cdo_req_init = ccc_req_init
136 };
137
138 static struct lu_device *vvp_device_free(const struct lu_env *env,
139                                          struct lu_device *d)
140 {
141         struct vvp_device *vdv  = lu2vvp_dev(d);
142         struct cl_site    *site = lu2cl_site(d->ld_site);
143         struct lu_device  *next = cl2lu_dev(vdv->vdv_next);
144
145         if (d->ld_site != NULL) {
146                 cl_site_fini(site);
147                 OBD_FREE_PTR(site);
148         }
149
150         cl_device_fini(lu2cl_dev(d));
151         OBD_FREE_PTR(vdv);
152         return next;
153 }
154
155 static struct lu_device *vvp_device_alloc(const struct lu_env *env,
156                                           struct lu_device_type *t,
157                                           struct lustre_cfg *cfg)
158 {
159         struct vvp_device *vdv;
160         struct lu_device *lud;
161         struct cl_site *site;
162         int rc;
163         ENTRY;
164
165         OBD_ALLOC_PTR(vdv);
166         if (vdv == NULL)
167                 RETURN(ERR_PTR(-ENOMEM));
168
169         lud = &vdv->vdv_cl.cd_lu_dev;
170         cl_device_init(&vdv->vdv_cl, t);
171         vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
172         vdv->vdv_cl.cd_ops = &vvp_cl_ops;
173
174         OBD_ALLOC_PTR(site);
175         if (site != NULL) {
176                 rc = cl_site_init(site, &vdv->vdv_cl);
177                 if (rc == 0)
178                         rc = lu_site_init_finish(&site->cs_lu);
179                 else {
180                         LASSERT(lud->ld_site == NULL);
181                         CERROR("Cannot init lu_site, rc %d.\n", rc);
182                         OBD_FREE_PTR(site);
183                 }
184         } else
185                 rc = -ENOMEM;
186         if (rc != 0) {
187                 vvp_device_free(env, lud);
188                 lud = ERR_PTR(rc);
189         }
190         RETURN(lud);
191 }
192
193 static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
194                            const char *name, struct lu_device *next)
195 {
196         struct vvp_device  *vdv;
197         int rc;
198         ENTRY;
199
200         vdv = lu2vvp_dev(d);
201         vdv->vdv_next = lu2cl_dev(next);
202
203         LASSERT(d->ld_site != NULL && next->ld_type != NULL);
204         next->ld_site = d->ld_site;
205         rc = next->ld_type->ldt_ops->ldto_device_init(
206                 env, next, next->ld_type->ldt_name, NULL);
207         if (rc == 0) {
208                 lu_device_get(next);
209                 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
210         }
211         RETURN(rc);
212 }
213
214 static struct lu_device *vvp_device_fini(const struct lu_env *env,
215                                          struct lu_device *d)
216 {
217         return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
218 }
219
220 static const struct lu_device_type_operations vvp_device_type_ops = {
221         .ldto_init = vvp_type_init,
222         .ldto_fini = vvp_type_fini,
223
224         .ldto_start = vvp_type_start,
225         .ldto_stop  = vvp_type_stop,
226
227         .ldto_device_alloc      = vvp_device_alloc,
228         .ldto_device_free       = vvp_device_free,
229         .ldto_device_init       = vvp_device_init,
230         .ldto_device_fini       = vvp_device_fini,
231 };
232
233 struct lu_device_type vvp_device_type = {
234         .ldt_tags     = LU_DEVICE_CL,
235         .ldt_name     = LUSTRE_VVP_NAME,
236         .ldt_ops      = &vvp_device_type_ops,
237         .ldt_ctx_tags = LCT_CL_THREAD
238 };
239
240 /**
241  * A mutex serializing calls to vvp_inode_fini() under extreme memory
242  * pressure, when environments cannot be allocated.
243  */
244 int vvp_global_init(void)
245 {
246         int result;
247
248         result = lu_kmem_init(vvp_caches);
249         if (result == 0) {
250                 result = ccc_global_init(&vvp_device_type);
251                 if (result != 0)
252                         lu_kmem_fini(vvp_caches);
253         }
254         return result;
255 }
256
257 void vvp_global_fini(void)
258 {
259         ccc_global_fini(&vvp_device_type);
260         lu_kmem_fini(vvp_caches);
261 }
262
263
264 /*****************************************************************************
265  *
266  * mirror obd-devices into cl devices.
267  *
268  */
269
270 int cl_sb_init(struct super_block *sb)
271 {
272         struct ll_sb_info *sbi;
273         struct cl_device  *cl;
274         struct lu_env     *env;
275         int rc = 0;
276         int refcheck;
277
278         sbi  = ll_s2sbi(sb);
279         env = cl_env_get(&refcheck);
280         if (!IS_ERR(env)) {
281                 cl = cl_type_setup(env, NULL, &vvp_device_type,
282                                    sbi->ll_dt_exp->exp_obd->obd_lu_dev);
283                 if (!IS_ERR(cl)) {
284                         cl2vvp_dev(cl)->vdv_sb = sb;
285                         sbi->ll_cl = cl;
286                         sbi->ll_site = cl2lu_dev(cl)->ld_site;
287                 }
288                 cl_env_put(env, &refcheck);
289         } else
290                 rc = PTR_ERR(env);
291         RETURN(rc);
292 }
293
294 int cl_sb_fini(struct super_block *sb)
295 {
296         struct ll_sb_info *sbi;
297         struct lu_env     *env;
298         struct cl_device  *cld;
299         int                refcheck;
300         int                result;
301
302         ENTRY;
303         sbi = ll_s2sbi(sb);
304         env = cl_env_get(&refcheck);
305         if (!IS_ERR(env)) {
306                 cld = sbi->ll_cl;
307
308                 if (cld != NULL) {
309                         cl_stack_fini(env, cld);
310                         sbi->ll_cl = NULL;
311                         sbi->ll_site = NULL;
312                 }
313                 cl_env_put(env, &refcheck);
314                 result = 0;
315         } else {
316                 CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
317                 result = PTR_ERR(env);
318         }
319
320         RETURN(result);
321 }
322
323 /****************************************************************************
324  *
325  * /proc/fs/lustre/llite/$MNT/dump_page_cache
326  *
327  ****************************************************************************/
328
329 /*
330  * To represent contents of a page cache as a byte stream, following
331  * information if encoded in 64bit offset:
332  *
333  *       - file hash bucket in lu_site::ls_hash[]       28bits
334  *
335  *       - how far file is from bucket head              4bits
336  *
337  *       - page index                                   32bits
338  *
339  * First two data identify a file in the cache uniquely.
340  */
341
342 #define PGC_OBJ_SHIFT (32 + 4)
343 #define PGC_DEPTH_SHIFT (32)
344
345 struct vvp_pgcache_id {
346         unsigned                 vpi_bucket;
347         unsigned                 vpi_depth;
348         uint32_t                 vpi_index;
349
350         unsigned                 vpi_curdep;
351         struct lu_object_header *vpi_obj;
352 };
353
354 static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
355 {
356         CLASSERT(sizeof(pos) == sizeof(__u64));
357
358         id->vpi_index  = pos & 0xffffffff;
359         id->vpi_depth  = (pos >> PGC_DEPTH_SHIFT) & 0xf;
360         id->vpi_bucket = ((unsigned long long)pos >> PGC_OBJ_SHIFT);
361 }
362
363 static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
364 {
365         return
366                 ((__u64)id->vpi_index) |
367                 ((__u64)id->vpi_depth  << PGC_DEPTH_SHIFT) |
368                 ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
369 }
370
371 static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
372                                struct hlist_node *hnode, void *data)
373 {
374         struct vvp_pgcache_id   *id  = data;
375         struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
376
377         if (id->vpi_curdep-- > 0)
378                 return 0; /* continue */
379
380         if (lu_object_is_dying(hdr))
381                 return 1;
382
383         cfs_hash_get(hs, hnode);
384         id->vpi_obj = hdr;
385         return 1;
386 }
387
388 static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
389                                          struct lu_device *dev,
390                                          struct vvp_pgcache_id *id)
391 {
392         LASSERT(lu_device_is_cl(dev));
393
394         id->vpi_depth &= 0xf;
395         id->vpi_obj    = NULL;
396         id->vpi_curdep = id->vpi_depth;
397
398         cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
399                                 vvp_pgcache_obj_get, id);
400         if (id->vpi_obj != NULL) {
401                 struct lu_object *lu_obj;
402
403                 lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
404                 if (lu_obj != NULL) {
405                         lu_object_ref_add(lu_obj, "dump", current);
406                         return lu2cl(lu_obj);
407                 }
408                 lu_object_put(env, lu_object_top(id->vpi_obj));
409
410         } else if (id->vpi_curdep > 0) {
411                 id->vpi_depth = 0xf;
412         }
413         return NULL;
414 }
415
416 static loff_t vvp_pgcache_find(const struct lu_env *env,
417                                struct lu_device *dev, loff_t pos)
418 {
419         struct cl_object     *clob;
420         struct lu_site       *site;
421         struct vvp_pgcache_id id;
422
423         site = dev->ld_site;
424         vvp_pgcache_id_unpack(pos, &id);
425
426         while (1) {
427                 if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
428                         return ~0ULL;
429                 clob = vvp_pgcache_obj(env, dev, &id);
430                 if (clob != NULL) {
431                         struct inode *inode = ccc_object_inode(clob);
432                         struct page *vmpage;
433                         int nr;
434
435                         nr = find_get_pages_contig(inode->i_mapping,
436                                                    id.vpi_index, 1, &vmpage);
437                         if (nr > 0) {
438                                 id.vpi_index = vmpage->index;
439                                 /* Cant support over 16T file */
440                                 nr = !(vmpage->index > 0xffffffff);
441                                 page_cache_release(vmpage);
442                         }
443
444                         lu_object_ref_del(&clob->co_lu, "dump", current);
445                         cl_object_put(env, clob);
446                         if (nr > 0)
447                                 return vvp_pgcache_id_pack(&id);
448                 }
449                 /* to the next object. */
450                 ++id.vpi_depth;
451                 id.vpi_depth &= 0xf;
452                 if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
453                         return ~0ULL;
454                 id.vpi_index = 0;
455         }
456 }
457
458 #define seq_page_flag(seq, page, flag, has_flags) do {                  \
459         if (test_bit(PG_##flag, &(page)->flags)) {                  \
460                 seq_printf(seq, "%s"#flag, has_flags ? "|" : "");       \
461                 has_flags = 1;                                          \
462         }                                                               \
463 } while(0)
464
465 static void vvp_pgcache_page_show(const struct lu_env *env,
466                                   struct seq_file *seq, struct cl_page *page)
467 {
468         struct ccc_page *cpg;
469         struct page      *vmpage;
470         int              has_flags;
471
472         cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
473         vmpage = cpg->cpg_page;
474         seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
475                    0 /* gen */,
476                    cpg, page,
477                    "none",
478                    cpg->cpg_write_queued ? "wq" : "- ",
479                    cpg->cpg_defer_uptodate ? "du" : "- ",
480                    PageWriteback(vmpage) ? "wb" : "-",
481                    vmpage,
482                    PFID(ll_inode2fid(vmpage->mapping->host)),
483                    vmpage->mapping->host, vmpage->index,
484                    page_count(vmpage));
485         has_flags = 0;
486         seq_page_flag(seq, vmpage, locked, has_flags);
487         seq_page_flag(seq, vmpage, error, has_flags);
488         seq_page_flag(seq, vmpage, referenced, has_flags);
489         seq_page_flag(seq, vmpage, uptodate, has_flags);
490         seq_page_flag(seq, vmpage, dirty, has_flags);
491         seq_page_flag(seq, vmpage, writeback, has_flags);
492         seq_printf(seq, "%s]\n", has_flags ? "" : "-");
493 }
494
495 static int vvp_pgcache_show(struct seq_file *f, void *v)
496 {
497         loff_t                   pos;
498         struct ll_sb_info       *sbi;
499         struct cl_object        *clob;
500         struct lu_env           *env;
501         struct vvp_pgcache_id    id;
502         int                      refcheck;
503         int                      result;
504
505         env = cl_env_get(&refcheck);
506         if (!IS_ERR(env)) {
507                 pos = *(loff_t *) v;
508                 vvp_pgcache_id_unpack(pos, &id);
509                 sbi = f->private;
510                 clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
511                 if (clob != NULL) {
512                         struct inode *inode = ccc_object_inode(clob);
513                         struct cl_page *page = NULL;
514                         struct page *vmpage;
515
516                         result = find_get_pages_contig(inode->i_mapping,
517                                                       id.vpi_index, 1, &vmpage);
518                         if (result > 0) {
519                                 lock_page(vmpage);
520                                 page = cl_vmpage_page(vmpage, clob);
521                                 unlock_page(vmpage);
522
523                                 page_cache_release(vmpage);
524                         }
525
526                         seq_printf(f, "%8x@"DFID": ", id.vpi_index,
527                                    PFID(lu_object_fid(&clob->co_lu)));
528                         if (page != NULL) {
529                                 vvp_pgcache_page_show(env, f, page);
530                                 cl_page_put(env, page);
531                         } else
532                                 seq_puts(f, "missing\n");
533                         lu_object_ref_del(&clob->co_lu, "dump", current);
534                         cl_object_put(env, clob);
535                 } else
536                         seq_printf(f, "%llx missing\n", pos);
537                 cl_env_put(env, &refcheck);
538                 result = 0;
539         } else
540                 result = PTR_ERR(env);
541         return result;
542 }
543
544 static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
545 {
546         struct ll_sb_info *sbi;
547         struct lu_env     *env;
548         int                refcheck;
549
550         sbi = f->private;
551
552         env = cl_env_get(&refcheck);
553         if (!IS_ERR(env)) {
554                 sbi = f->private;
555                 if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
556                         pos = ERR_PTR(-EFBIG);
557                 else {
558                         *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
559                                                 *pos);
560                         if (*pos == ~0ULL)
561                                 pos = NULL;
562                 }
563                 cl_env_put(env, &refcheck);
564         }
565         return pos;
566 }
567
568 static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
569 {
570         struct ll_sb_info *sbi;
571         struct lu_env     *env;
572         int                refcheck;
573
574         env = cl_env_get(&refcheck);
575         if (!IS_ERR(env)) {
576                 sbi = f->private;
577                 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
578                 if (*pos == ~0ULL)
579                         pos = NULL;
580                 cl_env_put(env, &refcheck);
581         }
582         return pos;
583 }
584
585 static void vvp_pgcache_stop(struct seq_file *f, void *v)
586 {
587         /* Nothing to do */
588 }
589
590 static struct seq_operations vvp_pgcache_ops = {
591         .start = vvp_pgcache_start,
592         .next  = vvp_pgcache_next,
593         .stop  = vvp_pgcache_stop,
594         .show  = vvp_pgcache_show
595 };
596
597 static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
598 {
599         struct ll_sb_info       *sbi = PDE_DATA(inode);
600         struct seq_file         *seq;
601         int                     result;
602
603         result = seq_open(filp, &vvp_pgcache_ops);
604         if (result == 0) {
605                 seq = filp->private_data;
606                 seq->private = sbi;
607         }
608         return result;
609 }
610
611 const struct file_operations vvp_dump_pgcache_file_ops = {
612         .owner   = THIS_MODULE,
613         .open    = vvp_dump_pgcache_seq_open,
614         .read    = seq_read,
615         .llseek  = seq_lseek,
616         .release = seq_release,
617 };