4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Definitions shared between vvp and liblustre, and other clients in the
39 * Author: Oleg Drokin <oleg.drokin@sun.com>
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
46 #include <lustre/lustre_idl.h>
47 #include <cl_object.h>
49 enum obd_notify_event;
58 blkcnt_t dirty_cnt(struct inode *inode);
60 int cl_glimpse_size0(struct inode *inode, int agl);
61 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
62 struct inode *inode, struct cl_object *clob, int agl);
64 static inline int cl_glimpse_size(struct inode *inode)
66 return cl_glimpse_size0(inode, 0);
69 static inline int cl_agl(struct inode *inode)
71 return cl_glimpse_size0(inode, 1);
75 * Locking policy for setattr.
77 enum ccc_setattr_lock_type {
78 /** Locking is done by server */
80 /** Extent lock is enqueued */
82 /** Existing local extent lock is used */
88 * IO state private to vvp or slp layers.
92 struct cl_io_slice cui_cl;
93 struct cl_io_lock_link cui_link;
95 * I/O vector information to or from which read/write is going.
97 struct iovec *cui_iov;
98 unsigned long cui_nrsegs;
100 * Total iov count for left IO.
102 unsigned long cui_tot_nrsegs;
104 * Old length for iov that was truncated partially.
108 * Total size for the left IO.
110 size_t cui_tot_count;
114 enum ccc_setattr_lock_type cui_local_lock;
117 struct cl_page_list cui_queue;
118 unsigned long cui_written;
124 * True iff io is processing glimpse right now.
128 * Layout version when this IO is initialized
130 __u32 cui_layout_gen;
132 * File descriptor against which IO is done.
134 struct ll_file_data *cui_fd;
135 struct kiocb *cui_iocb;
139 * True, if \a io is a normal io, False for other splice_{read,write}.
140 * must be impementated in arch specific code.
142 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
144 extern struct lu_context_key ccc_key;
145 extern struct lu_context_key ccc_session_key;
147 struct ccc_thread_info {
148 struct cl_lock_descr cti_descr;
150 struct cl_attr cti_attr;
153 static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
155 struct ccc_thread_info *info;
157 info = lu_context_key_get(&env->le_ctx, &ccc_key);
158 LASSERT(info != NULL);
162 static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
164 struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
165 memset(attr, 0, sizeof(*attr));
169 static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
171 struct cl_io *io = &ccc_env_info(env)->cti_io;
172 memset(io, 0, sizeof(*io));
177 struct ccc_io cs_ios;
180 static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
182 struct ccc_session *ses;
184 ses = lu_context_key_get(env->le_ses, &ccc_session_key);
185 LASSERT(ses != NULL);
189 static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
191 return &ccc_env_session(env)->cs_ios;
195 * ccc-private object state.
198 struct cl_object_header cob_header;
199 struct cl_object cob_cl;
200 struct inode *cob_inode;
203 * A list of dirty pages pending IO in the cache. Used by
204 * SOM. Protected by ll_inode_info::lli_lock.
206 * \see ccc_page::cpg_pending_linkage
208 struct list_head cob_pending_list;
211 * Access this counter is protected by inode->i_sem. Now that
212 * the lifetime of transient pages must be covered by inode sem,
213 * we don't need to hold any lock..
215 int cob_transient_pages;
217 * Number of outstanding mmaps on this file.
219 * \see ll_vm_open(), ll_vm_close().
221 atomic_t cob_mmap_cnt;
225 * cob_discard_page_warned
226 * if pages belonging to this object are discarded when a client
227 * is evicted, some debug info will be printed, this flag will be set
228 * during processing the first discarded page, then avoid flooding
229 * debug message for lots of discarded pages.
231 * \see ll_dirty_page_discard_warn.
233 unsigned int cob_discard_page_warned:1;
237 * ccc-private page state.
240 struct cl_page_slice cpg_cl;
241 unsigned cpg_defer_uptodate:1,
245 * Non-empty iff this page is already counted in
246 * ccc_object::cob_pending_list. Protected by
247 * ccc_object::cob_pending_guard. This list is only used as a flag,
248 * that is, never iterated through, only checked for list_empty(), but
249 * having a list is useful for debugging.
251 struct list_head cpg_pending_linkage;
253 struct page *cpg_page;
256 static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
258 return container_of(slice, struct ccc_page, cpg_cl);
261 static inline pgoff_t ccc_index(struct ccc_page *ccc)
263 return ccc->cpg_cl.cpl_index;
266 struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
269 struct cl_device cdv_cl;
270 struct super_block *cdv_sb;
271 struct cl_device *cdv_next;
275 struct cl_lock_slice clk_cl;
279 struct cl_req_slice crq_cl;
282 void *ccc_key_init (const struct lu_context *ctx,
283 struct lu_context_key *key);
284 void ccc_key_fini (const struct lu_context *ctx,
285 struct lu_context_key *key, void *data);
286 void *ccc_session_key_init(const struct lu_context *ctx,
287 struct lu_context_key *key);
288 void ccc_session_key_fini(const struct lu_context *ctx,
289 struct lu_context_key *key, void *data);
291 int ccc_device_init (const struct lu_env *env,
293 const char *name, struct lu_device *next);
294 struct lu_device *ccc_device_fini (const struct lu_env *env,
295 struct lu_device *d);
296 struct lu_device *ccc_device_alloc(const struct lu_env *env,
297 struct lu_device_type *t,
298 struct lustre_cfg *cfg,
299 const struct lu_device_operations *luops,
300 const struct cl_device_operations *clops);
301 struct lu_device *ccc_device_free (const struct lu_env *env,
302 struct lu_device *d);
303 struct lu_object *ccc_object_alloc(const struct lu_env *env,
304 const struct lu_object_header *hdr,
305 struct lu_device *dev,
306 const struct cl_object_operations *clops,
307 const struct lu_object_operations *luops);
309 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
311 void ccc_umount(const struct lu_env *env, struct cl_device *dev);
312 int ccc_global_init(struct lu_device_type *device_type);
313 void ccc_global_fini(struct lu_device_type *device_type);
314 int ccc_object_init0(const struct lu_env *env,struct ccc_object *vob,
315 const struct cl_object_conf *conf);
316 int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
317 const struct lu_object_conf *conf);
318 void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
319 int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
320 struct cl_lock *lock, const struct cl_io *io,
321 const struct cl_lock_operations *lkops);
322 int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
323 const struct cl_attr *attr, unsigned valid);
324 int ccc_object_glimpse(const struct lu_env *env,
325 const struct cl_object *obj, struct ost_lvb *lvb);
326 int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
327 const struct cl_object_conf *conf);
328 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
329 void ccc_transient_page_verify(const struct cl_page *page);
330 int ccc_transient_page_own(const struct lu_env *env,
331 const struct cl_page_slice *slice,
332 struct cl_io *io, int nonblock);
333 void ccc_transient_page_assume(const struct lu_env *env,
334 const struct cl_page_slice *slice,
336 void ccc_transient_page_unassume(const struct lu_env *env,
337 const struct cl_page_slice *slice,
339 void ccc_transient_page_disown(const struct lu_env *env,
340 const struct cl_page_slice *slice,
342 void ccc_transient_page_discard(const struct lu_env *env,
343 const struct cl_page_slice *slice,
345 int ccc_transient_page_prep(const struct lu_env *env,
346 const struct cl_page_slice *slice,
348 void ccc_lock_delete(const struct lu_env *env,
349 const struct cl_lock_slice *slice);
350 void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice);
351 int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice,
352 struct cl_io *io, __u32 enqflags);
353 int ccc_lock_use(const struct lu_env *env,const struct cl_lock_slice *slice);
354 int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice);
355 int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice);
356 int ccc_lock_fits_into(const struct lu_env *env,
357 const struct cl_lock_slice *slice,
358 const struct cl_lock_descr *need,
359 const struct cl_io *io);
360 void ccc_lock_state(const struct lu_env *env,
361 const struct cl_lock_slice *slice,
362 enum cl_lock_state state);
364 void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios);
365 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
366 __u32 enqflags, enum cl_lock_mode mode,
367 pgoff_t start, pgoff_t end);
368 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
369 __u32 enqflags, enum cl_lock_mode mode,
370 loff_t start, loff_t end);
371 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
372 void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
374 void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
376 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
377 struct cl_io *io, loff_t start, size_t count, int *exceed);
378 void ccc_req_completion(const struct lu_env *env,
379 const struct cl_req_slice *slice, int ioret);
380 void ccc_req_attr_set(const struct lu_env *env,const struct cl_req_slice *slice,
381 const struct cl_object *obj,
382 struct cl_req_attr *oa, obd_valid flags);
384 struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
385 struct lu_object *ccc2lu (struct ccc_object *vob);
386 struct ccc_device *lu2ccc_dev (const struct lu_device *d);
387 struct ccc_device *cl2ccc_dev (const struct cl_device *d);
388 struct ccc_object *lu2ccc (const struct lu_object *obj);
389 struct ccc_object *cl2ccc (const struct cl_object *obj);
390 struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
391 struct ccc_io *cl2ccc_io (const struct lu_env *env,
392 const struct cl_io_slice *slice);
393 struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
394 struct page *cl2vm_page (const struct cl_page_slice *slice);
395 struct inode *ccc_object_inode(const struct cl_object *obj);
396 struct ccc_object *cl_inode2ccc (struct inode *inode);
398 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
399 struct obd_capa *capa);
401 struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
402 int ccc_object_invariant(const struct cl_object *obj);
403 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
404 void cl_inode_fini(struct inode *inode);
405 int cl_local_size(struct inode *inode);
407 __u16 ll_dirent_type_get(struct lu_dirent *ent);
408 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
409 __u32 cl_fid_build_gen(const struct lu_fid *fid);
411 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
412 # define CLOBINVRNT(env, clob, expr) \
414 if (unlikely(!(expr))) { \
415 LU_OBJECT_DEBUG(D_ERROR, (env), &(clob)->co_lu, #expr "\n"); \
419 #else /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
420 # define CLOBINVRNT(env, clob, expr) \
421 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr))
422 #endif /* !CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK */
424 int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
425 int cl_ocd_update(struct obd_device *host,
426 struct obd_device *watched,
427 enum obd_notify_event ev, void *owner, void *data);
429 struct ccc_grouplock {
430 struct lu_env *cg_env;
432 struct cl_lock *cg_lock;
433 unsigned long cg_gid;
436 int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
437 struct ccc_grouplock *cg);
438 void cl_put_grouplock(struct ccc_grouplock *cg);
441 * New interfaces to get and put lov_stripe_md from lov layer. This violates
442 * layering because lov_stripe_md is supposed to be a private data in lov.
444 * NB: If you find you have to use these interfaces for your new code, please
445 * think about it again. These interfaces may be removed in the future for
446 * better layering. */
447 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
448 void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
449 int lov_read_and_clear_async_rc(struct cl_object *clob);
451 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
452 void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
455 * Data structure managing a client's cached pages. A count of
456 * "unstable" pages is maintained, and an LRU of clean pages is
457 * maintained. "unstable" pages are pages pinned by the ptlrpc
458 * layer for recovery purposes.
460 struct cl_client_cache {
466 * # of threads are doing shrinking
468 unsigned int ccc_lru_shrinkers;
470 * # of LRU entries available
472 atomic_long_t ccc_lru_left;
474 * List of entities(OSCs) for this LRU cache
476 struct list_head ccc_lru;
478 * Max # of LRU entries
480 unsigned long ccc_lru_max;
482 * Lock to protect ccc_lru list
484 spinlock_t ccc_lru_lock;
486 * Set if unstable check is enabled
488 unsigned int ccc_unstable_check:1;
490 * # of unstable pages for this mount point
492 atomic_long_t ccc_unstable_nr;
494 * Waitq for awaiting unstable pages to reach zero.
495 * Used at umounting time and signaled on BRW commit
497 wait_queue_head_t ccc_unstable_waitq;
501 LUSTRE_OPC_MKDIR = 0,
502 LUSTRE_OPC_SYMLINK = 1,
503 LUSTRE_OPC_MKNOD = 2,
504 LUSTRE_OPC_CREATE = 3,
509 CLI_SET_MEA = 1 << 0,
510 CLI_RM_ENTRY = 1 << 1,
513 CLI_MIGRATE = 1 << 4,
516 #endif /*LCLIENT_H */