4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2013, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * Internal definitions for VVP layer.
33 * Author: Nikita Danilov <nikita.danilov@sun.com>
36 #ifndef VVP_INTERNAL_H
37 #define VVP_INTERNAL_H
39 #include <cl_object.h>
41 enum obd_notify_event;
49 * IO state private to VVP layer.
53 struct cl_io_slice vui_cl;
54 struct cl_io_lock_link vui_link;
56 * I/O vector information to or from which read/write is going.
58 struct iov_iter *vui_iter;
60 * Total size for the left IO.
66 struct vm_area_struct *ft_vma;
68 * locked page returned from vvp_io
70 struct page *ft_vmpage;
74 struct vm_fault *ft_vmf;
76 * fault API used bitflags for return code.
78 unsigned int ft_flags;
80 * check that flags are from filemap_fault
83 struct cl_page_list ft_queue;
86 struct cl_page_list vui_queue;
87 unsigned long vui_written;
88 unsigned long vui_read;
91 } readwrite; /* normal io */
95 * Layout version when this IO is initialized
99 * File descriptor against which IO is done.
101 struct ll_file_data *vui_fd;
102 struct kiocb *vui_iocb;
104 /* Readahead state. */
105 pgoff_t vui_ra_start_idx;
106 pgoff_t vui_ra_pages;
107 /* Set when vui_ra_{start,count} have been initialized. */
111 extern struct lu_device_type vvp_device_type;
113 extern struct lu_context_key vvp_session_key;
114 extern struct lu_context_key vvp_thread_key;
116 extern struct kmem_cache *vvp_object_kmem;
118 struct vvp_thread_info {
119 struct cl_lock vti_lock;
120 struct cl_lock_descr vti_descr;
122 struct cl_attr vti_attr;
123 struct cl_sync_io vti_anchor;
126 static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
128 struct vvp_thread_info *vti;
130 vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
131 LASSERT(vti != NULL);
136 static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
138 struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
140 memset(lock, 0, sizeof(*lock));
145 static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
147 struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
149 memset(attr, 0, sizeof(*attr));
154 static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
156 struct cl_io *io = &vvp_env_info(env)->vti_io;
158 memset(io, 0, sizeof(*io));
164 struct vvp_io vs_ios;
167 static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
169 struct vvp_session *ses;
171 ses = lu_context_key_get(env->le_ses, &vvp_session_key);
172 LASSERT(ses != NULL);
177 static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
179 return &vvp_env_session(env)->vs_ios;
183 * VPP-private object state.
186 struct cl_object_header vob_header;
187 struct cl_object vob_cl;
188 struct inode *vob_inode;
191 * Number of outstanding mmaps on this file.
193 * \see ll_vm_open(), ll_vm_close().
195 atomic_t vob_mmap_cnt;
199 * vob_discard_page_warned
200 * if pages belonging to this object are discarded when a client
201 * is evicted, some debug info will be printed, this flag will be set
202 * during processing the first discarded page, then avoid flooding
203 * debug message for lots of discarded pages.
205 * \see ll_dirty_page_discard_warn.
207 unsigned int vob_discard_page_warned:1;
211 * VVP-private page state.
214 struct cl_page_slice vpg_cl;
215 unsigned vpg_defer_uptodate:1,
219 struct page *vpg_page;
222 static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
224 return container_of(slice, struct vvp_page, vpg_cl);
227 static inline pgoff_t vvp_index(struct vvp_page *vpg)
229 return vpg->vpg_page->index;
233 struct cl_device vdv_cl;
234 struct cl_device *vdv_next;
237 static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
239 return &vdv->vdv_cl.cd_lu_dev;
242 static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
244 return container_of_safe(d, struct vvp_device, vdv_cl.cd_lu_dev);
247 static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
249 return container_of_safe(d, struct vvp_device, vdv_cl);
252 static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
254 return container_of_safe(obj, struct vvp_object, vob_cl);
257 static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
259 return container_of_safe(obj, struct vvp_object, vob_cl.co_lu);
262 static inline struct inode *vvp_object_inode(const struct cl_object *obj)
264 return cl2vvp(obj)->vob_inode;
267 int vvp_object_invariant(const struct cl_object *obj);
268 struct vvp_object *cl_inode2vvp(struct inode *inode);
270 static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
272 return cl2vvp_page(slice)->vpg_page;
275 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
276 # define CLOBINVRNT(env, clob, expr) \
278 if (unlikely(!(expr))) { \
279 LU_OBJECT_DEBUG(D_ERROR, (env), &(clob)->co_lu, \
284 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
285 # define CLOBINVRNT(env, clob, expr) \
286 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr))
287 #endif /* CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
289 int lov_read_and_clear_async_rc(struct cl_object *clob);
291 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
293 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
294 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
295 struct cl_page *page, pgoff_t index);
296 struct lu_object *vvp_object_alloc(const struct lu_env *env,
297 const struct lu_object_header *hdr,
298 struct lu_device *dev);
300 int vvp_global_init(void);
301 void vvp_global_fini(void);
303 #ifndef HAVE_ACCOUNT_PAGE_DIRTIED_EXPORT
304 extern unsigned int (*vvp_account_page_dirtied)(struct page *page,
305 struct address_space *mapping);
308 extern const struct file_operations vvp_dump_pgcache_file_ops;
310 #endif /* VVP_INTERNAL_H */