4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2013, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Internal definitions for VVP layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #ifndef VVP_INTERNAL_H
42 #define VVP_INTERNAL_H
44 #include <lustre/lustre_idl.h>
45 #include <cl_object.h>
47 enum obd_notify_event;
56 blkcnt_t dirty_cnt(struct inode *inode);
58 int cl_glimpse_size0(struct inode *inode, int agl);
59 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
60 struct inode *inode, struct cl_object *clob, int agl);
62 static inline int cl_glimpse_size(struct inode *inode)
64 return cl_glimpse_size0(inode, 0);
67 static inline int cl_agl(struct inode *inode)
69 return cl_glimpse_size0(inode, 1);
73 * Locking policy for setattr.
75 enum ccc_setattr_lock_type {
76 /** Locking is done by server */
78 /** Extent lock is enqueued */
80 /** Existing local extent lock is used */
86 * IO state private to vvp or slp layers.
90 struct cl_io_slice cui_cl;
91 struct cl_io_lock_link cui_link;
93 * I/O vector information to or from which read/write is going.
95 struct iovec *cui_iov;
96 unsigned long cui_nrsegs;
98 * Total iov count for left IO.
100 unsigned long cui_tot_nrsegs;
102 * Old length for iov that was truncated partially.
106 * Total size for the left IO.
108 size_t cui_tot_count;
112 enum ccc_setattr_lock_type cui_local_lock;
115 struct cl_page_list cui_queue;
116 unsigned long cui_written;
122 * Layout version when this IO is initialized
124 __u32 cui_layout_gen;
126 * File descriptor against which IO is done.
128 struct ll_file_data *cui_fd;
129 struct kiocb *cui_iocb;
133 * True, if \a io is a normal io, False for other splice_{read,write}.
134 * must be impementated in arch specific code.
136 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
138 extern struct lu_context_key ccc_key;
139 extern struct lu_context_key ccc_session_key;
141 extern struct kmem_cache *vvp_object_kmem;
143 struct ccc_thread_info {
144 struct cl_lock cti_lock;
145 struct cl_lock_descr cti_descr;
147 struct cl_attr cti_attr;
150 static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
152 struct ccc_thread_info *info;
154 info = lu_context_key_get(&env->le_ctx, &ccc_key);
155 LASSERT(info != NULL);
160 static inline struct cl_lock *ccc_env_lock(const struct lu_env *env)
162 struct cl_lock *lock = &ccc_env_info(env)->cti_lock;
164 memset(lock, 0, sizeof(*lock));
169 static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
171 struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
173 memset(attr, 0, sizeof(*attr));
178 static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
180 struct cl_io *io = &ccc_env_info(env)->cti_io;
182 memset(io, 0, sizeof(*io));
188 struct ccc_io cs_ios;
191 static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
193 struct ccc_session *ses;
195 ses = lu_context_key_get(env->le_ses, &ccc_session_key);
196 LASSERT(ses != NULL);
201 static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
203 return &ccc_env_session(env)->cs_ios;
207 * ccc-private object state.
210 struct cl_object_header vob_header;
211 struct cl_object vob_cl;
212 struct inode *vob_inode;
215 * A list of dirty pages pending IO in the cache. Used by
216 * SOM. Protected by ll_inode_info::lli_lock.
218 * \see vvp_page::vpg_pending_linkage
220 struct list_head vob_pending_list;
223 * Number of transient pages. This is no longer protected by i_sem,
224 * and needs to be atomic. This is not actually used for anything,
225 * and can probably be removed.
227 atomic_t vob_transient_pages;
229 * Number of outstanding mmaps on this file.
231 * \see ll_vm_open(), ll_vm_close().
233 atomic_t vob_mmap_cnt;
237 * vob_discard_page_warned
238 * if pages belonging to this object are discarded when a client
239 * is evicted, some debug info will be printed, this flag will be set
240 * during processing the first discarded page, then avoid flooding
241 * debug message for lots of discarded pages.
243 * \see ll_dirty_page_discard_warn.
245 unsigned int vob_discard_page_warned:1;
249 * VVP-private page state.
252 struct cl_page_slice vpg_cl;
253 unsigned vpg_defer_uptodate:1,
257 * Non-empty iff this page is already counted in
258 * vvp_object::vob_pending_list. This list is only used as a flag,
259 * that is, never iterated through, only checked for list_empty(), but
260 * having a list is useful for debugging.
262 struct list_head vpg_pending_linkage;
264 struct page *vpg_page;
267 static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
269 return container_of(slice, struct vvp_page, vpg_cl);
272 static inline pgoff_t vvp_index(struct vvp_page *vpg)
274 return vpg->vpg_cl.cpl_index;
278 struct cl_device vdv_cl;
279 struct super_block *vdv_sb;
280 struct cl_device *vdv_next;
284 struct cl_lock_slice clk_cl;
288 struct cl_req_slice crq_cl;
291 void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key);
292 void ccc_key_fini(const struct lu_context *ctx, struct lu_context_key *key,
294 void *ccc_session_key_init(const struct lu_context *ctx,
295 struct lu_context_key *key);
296 void ccc_session_key_fini(const struct lu_context *ctx,
297 struct lu_context_key *key, void *data);
299 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
301 void ccc_umount(const struct lu_env *env, struct cl_device *dev);
302 int ccc_global_init(struct lu_device_type *device_type);
303 void ccc_global_fini(struct lu_device_type *device_type);
304 int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
305 struct cl_lock *lock, const struct cl_io *io,
306 const struct cl_lock_operations *lkops);
307 void ccc_lock_delete(const struct lu_env *env,
308 const struct cl_lock_slice *slice);
309 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
310 int ccc_lock_enqueue(const struct lu_env *env,
311 const struct cl_lock_slice *slice,
312 struct cl_io *io, struct cl_sync_io *anchor);
313 void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios);
314 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
315 __u32 enqflags, enum cl_lock_mode mode,
316 pgoff_t start, pgoff_t end);
317 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
318 __u32 enqflags, enum cl_lock_mode mode,
319 loff_t start, loff_t end);
320 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
321 void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
323 void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
325 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
326 struct cl_io *io, loff_t start, size_t count, int *exceed);
327 void ccc_req_completion(const struct lu_env *env,
328 const struct cl_req_slice *slice, int ioret);
329 void ccc_req_attr_set(const struct lu_env *env,
330 const struct cl_req_slice *slice,
331 const struct cl_object *obj,
332 struct cl_req_attr *oa, obd_valid flags);
334 static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
336 return &vdv->vdv_cl.cd_lu_dev;
339 static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
341 return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
344 static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
346 return container_of0(d, struct vvp_device, vdv_cl);
349 static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
351 return container_of0(obj, struct vvp_object, vob_cl);
354 static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
356 return container_of0(obj, struct vvp_object, vob_cl.co_lu);
359 static inline struct inode *vvp_object_inode(const struct cl_object *obj)
361 return cl2vvp(obj)->vob_inode;
364 int vvp_object_invariant(const struct cl_object *obj);
365 struct vvp_object *cl_inode2vvp(struct inode *inode);
367 static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
369 return cl2vvp_page(slice)->vpg_page;
372 struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice);
373 struct ccc_io *cl2ccc_io(const struct lu_env *env,
374 const struct cl_io_slice *slice);
375 struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice);
377 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
378 struct obd_capa *capa);
380 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
381 void cl_inode_fini(struct inode *inode);
382 int cl_local_size(struct inode *inode);
384 __u16 ll_dirent_type_get(struct lu_dirent *ent);
385 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
386 __u32 cl_fid_build_gen(const struct lu_fid *fid);
388 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
389 # define CLOBINVRNT(env, clob, expr) \
391 if (unlikely(!(expr))) { \
392 LU_OBJECT_DEBUG(D_ERROR, (env), &(clob)->co_lu, \
397 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
398 # define CLOBINVRNT(env, clob, expr) \
399 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr))
400 #endif /* CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
402 int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
403 int cl_ocd_update(struct obd_device *host,
404 struct obd_device *watched,
405 enum obd_notify_event ev, void *owner, void *data);
407 struct ccc_grouplock {
408 struct lu_env *cg_env;
410 struct cl_lock *cg_lock;
411 unsigned long cg_gid;
414 int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
415 struct ccc_grouplock *cg);
416 void cl_put_grouplock(struct ccc_grouplock *cg);
419 * New interfaces to get and put lov_stripe_md from lov layer. This violates
420 * layering because lov_stripe_md is supposed to be a private data in lov.
422 * NB: If you find you have to use these interfaces for your new code, please
423 * think about it again. These interfaces may be removed in the future for
424 * better layering. */
425 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
426 void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
427 int lov_read_and_clear_async_rc(struct cl_object *clob);
429 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
430 void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
433 LUSTRE_OPC_MKDIR = 0,
434 LUSTRE_OPC_SYMLINK = 1,
435 LUSTRE_OPC_MKNOD = 2,
436 LUSTRE_OPC_CREATE = 3,
440 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
442 int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
443 struct cl_lock *lock, const struct cl_io *io);
444 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
445 struct cl_page *page, pgoff_t index);
446 struct lu_object *vvp_object_alloc(const struct lu_env *env,
447 const struct lu_object_header *hdr,
448 struct lu_device *dev);
450 extern const struct file_operations vvp_dump_pgcache_file_ops;
452 #endif /* VVP_INTERNAL_H */