4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Internal interfaces of OSC layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 #ifndef OSC_CL_INTERNAL_H
43 #define OSC_CL_INTERNAL_H
45 #include <libcfs/libcfs.h>
47 /* osc_build_res_name() */
48 #include <cl_object.h>
49 #include "osc_internal.h"
58 * State maintained by osc layer for each IO context.
62 struct cl_io_slice oi_cl;
63 /** true if this io is lockless. */
64 unsigned int oi_lockless:1,
65 /** true if this io is counted as active IO */
67 /** how many LRU pages are reserved for this IO */
68 unsigned long oi_lru_reserved;
70 /** active extents, we know how many bytes is going to be written,
71 * so having an active extent will prevent it from being fragmented */
72 struct osc_extent *oi_active;
73 /** partially truncated extent, we need to hold this extent to prevent
74 * page writeback from happening. */
75 struct osc_extent *oi_trunc;
76 /** write osc_lock for this IO, used by osc_extent_find(). */
77 struct osc_lock *oi_write_osclock;
79 struct osc_async_cbargs {
82 struct completion opc_sync;
87 * State maintained by osc layer for the duration of a system call.
93 #define OTI_PVEC_SIZE 256
94 struct osc_thread_info {
95 struct ldlm_res_id oti_resname;
96 union ldlm_policy_data oti_policy;
97 struct cl_lock_descr oti_descr;
98 struct cl_attr oti_attr;
99 struct lustre_handle oti_handle;
100 struct cl_page_list oti_plist;
102 void *oti_pvec[OTI_PVEC_SIZE];
104 * Fields used by cl_lock_discard_pages().
106 pgoff_t oti_next_index;
107 pgoff_t oti_fn_index; /* first non-overlapped index */
108 struct cl_sync_io oti_anchor;
109 struct cl_req_attr oti_req_attr;
110 struct lu_buf oti_ladvise_buf;
114 struct cl_object oo_cl;
115 struct lov_oinfo *oo_oinfo;
117 * True if locking against this stripe got -EUSERS.
120 cfs_time_t oo_contention_time;
121 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
123 * IO context used for invariant checks in osc_lock_has_pages().
125 struct cl_io oo_debug_io;
126 /** Serialization object for osc_object::oo_debug_io. */
127 struct mutex oo_debug_mutex;
130 * used by the osc to keep track of what objects to build into rpcs.
131 * Protected by client_obd->cli_loi_list_lock.
133 struct list_head oo_ready_item;
134 struct list_head oo_hp_ready_item;
135 struct list_head oo_write_item;
136 struct list_head oo_read_item;
139 * extent is a red black tree to manage (async) dirty pages.
141 struct rb_root oo_root;
143 * Manage write(dirty) extents.
145 struct list_head oo_hp_exts; /* list of hp extents */
146 struct list_head oo_urgent_exts; /* list of writeback extents */
147 struct list_head oo_full_exts;
149 struct list_head oo_reading_exts;
151 atomic_t oo_nr_reads;
152 atomic_t oo_nr_writes;
154 /** Protect extent tree. Will be used to protect
155 * oo_{read|write}_pages soon. */
159 * Radix tree for caching pages
161 spinlock_t oo_tree_lock;
162 struct radix_tree_root oo_tree;
163 unsigned long oo_npages;
165 /* Protect osc_lock this osc_object has */
166 struct list_head oo_ol_list;
167 spinlock_t oo_ol_spin;
169 /** number of active IOs of this object */
171 wait_queue_head_t oo_io_waitq;
174 static inline void osc_object_lock(struct osc_object *obj)
176 spin_lock(&obj->oo_lock);
179 static inline int osc_object_trylock(struct osc_object *obj)
181 return spin_trylock(&obj->oo_lock);
184 static inline void osc_object_unlock(struct osc_object *obj)
186 spin_unlock(&obj->oo_lock);
189 static inline int osc_object_is_locked(struct osc_object *obj)
191 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
192 return spin_is_locked(&obj->oo_lock);
195 * It is not perfect to return true all the time.
196 * But since this function is only used for assertion
197 * and checking, it seems OK.
204 * Lock "micro-states" for osc layer.
206 enum osc_lock_state {
215 * osc-private state of cl_lock.
217 * Interaction with DLM.
219 * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
220 * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
222 * This pointer is protected through a reference, acquired by
223 * osc_lock_upcall0(). Also, an additional reference is acquired by
224 * ldlm_lock_addref() call protecting the lock from cancellation, until
225 * osc_lock_unuse() releases it.
227 * Below is a description of how lock references are acquired and released
230 * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
231 * - ldlm_lock_create()
232 * - ldlm_lock_new(): initializes a lock with 2 references. One for
233 * the caller (released when reply from the server is received, or on
234 * error), and another for the hash table.
235 * - ldlm_lock_addref_internal(): protects the lock from cancellation.
237 * - When reply is received from the server (osc_enqueue_interpret())
238 * - ldlm_cli_enqueue_fini()
239 * - LDLM_LOCK_PUT(): releases caller reference acquired by
242 * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
243 * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
245 * - When lock is being cancelled (ldlm_lock_cancel())
246 * - ldlm_lock_destroy()
247 * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
250 * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
251 * either when lock is cancelled (osc_lock_blocking()), or when locks is
252 * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
253 * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
257 struct cl_lock_slice ols_cl;
258 /** Internal lock to protect states, etc. */
260 /** Owner sleeps on this channel for state change */
261 struct cl_sync_io *ols_owner;
262 /** waiting list for this lock to be cancelled */
263 struct list_head ols_waiting_list;
264 /** wait entry of ols_waiting_list */
265 struct list_head ols_wait_entry;
266 /** list entry for osc_object::oo_ol_list */
267 struct list_head ols_nextlock_oscobj;
269 /** underlying DLM lock */
270 struct ldlm_lock *ols_dlmlock;
271 /** DLM flags with which osc_lock::ols_lock was enqueued */
273 /** osc_lock::ols_lock handle */
274 struct lustre_handle ols_handle;
275 struct ldlm_enqueue_info ols_einfo;
276 enum osc_lock_state ols_state;
277 /** lock value block */
278 struct ost_lvb ols_lvb;
281 * true, if ldlm_lock_addref() was called against
282 * osc_lock::ols_lock. This is used for sanity checking.
284 * \see osc_lock::ols_has_ref
286 unsigned ols_hold :1,
288 * this is much like osc_lock::ols_hold, except that this bit is
289 * cleared _after_ reference in released in osc_lock_unuse(). This
290 * fine distinction is needed because:
292 * - if ldlm lock still has a reference, osc_ast_data_get() needs
293 * to return associated cl_lock (so that a flag is needed that is
294 * cleared after ldlm_lock_decref() returned), and
296 * - ldlm_lock_decref() can invoke blocking ast (for a
297 * LDLM_FL_CBPENDING lock), and osc_lock functions like
298 * osc_lock_cancel() called from there need to know whether to
299 * release lock reference (so that a flag is needed that is
300 * cleared before ldlm_lock_decref() is called).
304 * inherit the lockless attribute from top level cl_io.
305 * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
309 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
310 * the EVAVAIL error as torerable, this will make upper logic happy
311 * to wait all glimpse locks to each OSTs to be completed.
312 * Glimpse lock converts to normal lock if the server lock is
314 * Glimpse lock should be destroyed immediately after use.
318 * For async glimpse lock.
325 * Page state private for osc layer.
328 struct cl_page_slice ops_cl;
330 * Page queues used by osc to detect when RPC can be formed.
332 struct osc_async_page ops_oap;
334 * An offset within page from which next transfer starts. This is used
335 * by cl_page_clip() to submit partial page transfers.
339 * An offset within page at which next transfer ends.
341 * \see osc_page::ops_from.
345 * Boolean, true iff page is under transfer. Used for sanity checking.
347 unsigned ops_transfer_pinned:1,
353 * Set if the page must be transferred with OBD_BRW_SRVLOCK.
357 * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
359 struct list_head ops_lru;
361 * Submit time - the time when the page is starting RPC. For debugging.
363 cfs_time_t ops_submit_time;
366 extern struct kmem_cache *osc_lock_kmem;
367 extern struct kmem_cache *osc_object_kmem;
368 extern struct kmem_cache *osc_thread_kmem;
369 extern struct kmem_cache *osc_session_kmem;
370 extern struct kmem_cache *osc_extent_kmem;
372 extern struct lu_device_type osc_device_type;
373 extern struct lu_context_key osc_key;
374 extern struct lu_context_key osc_session_key;
376 #define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
378 int osc_lock_init(const struct lu_env *env,
379 struct cl_object *obj, struct cl_lock *lock,
380 const struct cl_io *io);
381 int osc_io_init (const struct lu_env *env,
382 struct cl_object *obj, struct cl_io *io);
383 struct lu_object *osc_object_alloc(const struct lu_env *env,
384 const struct lu_object_header *hdr,
385 struct lu_device *dev);
386 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
387 struct cl_page *page, pgoff_t ind);
389 void osc_index2policy(union ldlm_policy_data *policy,
390 const struct cl_object *obj, pgoff_t start, pgoff_t end);
391 int osc_lvb_print(const struct lu_env *env, void *cookie,
392 lu_printer_t p, const struct ost_lvb *lvb);
394 void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
395 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
396 enum cl_req_type crt, int brw_flags);
397 int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
398 int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
400 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
401 struct page *page, loff_t offset);
402 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
403 struct osc_page *ops);
404 int osc_page_cache_add(const struct lu_env *env,
405 const struct cl_page_slice *slice, struct cl_io *io);
406 int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
407 struct osc_page *ops);
408 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
409 struct osc_page *ops);
410 int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
411 struct list_head *list, int cmd, int brw_flags);
412 int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
413 __u64 size, struct osc_extent **extp);
414 void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
415 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
416 pgoff_t start, pgoff_t end, int hp, int discard);
417 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
418 pgoff_t start, pgoff_t end);
419 void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
420 struct osc_object *osc);
421 int lru_queue_work(const struct lu_env *env, void *data);
423 void osc_object_set_contended (struct osc_object *obj);
424 void osc_object_clear_contended(struct osc_object *obj);
425 int osc_object_is_contended (struct osc_object *obj);
427 int osc_lock_is_lockless (const struct osc_lock *olck);
429 /*****************************************************************************
435 static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
437 struct osc_thread_info *info;
439 info = lu_context_key_get(&env->le_ctx, &osc_key);
440 LASSERT(info != NULL);
444 static inline struct osc_session *osc_env_session(const struct lu_env *env)
446 struct osc_session *ses;
448 ses = lu_context_key_get(env->le_ses, &osc_session_key);
449 LASSERT(ses != NULL);
453 static inline struct osc_io *osc_env_io(const struct lu_env *env)
455 return &osc_env_session(env)->os_io;
458 static inline int osc_is_object(const struct lu_object *obj)
460 return obj->lo_dev->ld_type == &osc_device_type;
463 static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
465 LINVRNT(d->ld_type == &osc_device_type);
466 return container_of0(d, struct osc_device, od_cl.cd_lu_dev);
469 static inline struct obd_export *osc_export(const struct osc_object *obj)
471 return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->od_exp;
474 static inline struct client_obd *osc_cli(const struct osc_object *obj)
476 return &osc_export(obj)->exp_obd->u.cli;
479 static inline struct osc_object *cl2osc(const struct cl_object *obj)
481 LINVRNT(osc_is_object(&obj->co_lu));
482 return container_of0(obj, struct osc_object, oo_cl);
485 static inline struct cl_object *osc2cl(const struct osc_object *obj)
487 return (struct cl_object *)&obj->oo_cl;
490 static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
492 LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
493 if (mode == CLM_READ)
495 if (mode == CLM_WRITE)
500 static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
502 LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
510 static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
512 LINVRNT(osc_is_object(&slice->cpl_obj->co_lu));
513 return container_of0(slice, struct osc_page, ops_cl);
516 static inline struct osc_page *oap2osc(struct osc_async_page *oap)
518 return container_of0(oap, struct osc_page, ops_oap);
521 static inline pgoff_t osc_index(struct osc_page *opg)
523 return opg->ops_cl.cpl_index;
526 static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
528 return oap2osc(oap)->ops_cl.cpl_page;
531 static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
533 return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
536 static inline struct osc_page *
537 osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
539 const struct cl_page_slice *slice;
541 LASSERT(osc != NULL);
542 slice = cl_object_page_slice(&osc->oo_cl, page);
543 return cl2osc_page(slice);
546 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
548 LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
549 return container_of0(slice, struct osc_lock, ols_cl);
552 static inline struct osc_lock *osc_lock_at(const struct cl_lock *lock)
554 return cl2osc_lock(cl_lock_at(lock, &osc_device_type));
557 static inline int osc_io_srvlock(struct osc_io *oio)
559 return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
562 enum osc_extent_state {
563 OES_INV = 0, /** extent is just initialized or destroyed */
564 OES_ACTIVE = 1, /** process is using this extent */
565 OES_CACHE = 2, /** extent is ready for IO */
566 OES_LOCKING = 3, /** locking page to prepare IO */
567 OES_LOCK_DONE = 4, /** locking finished, ready to send */
568 OES_RPC = 5, /** in RPC */
569 OES_TRUNC = 6, /** being truncated */
574 * osc_extent data to manage dirty pages.
575 * osc_extent has the following attributes:
576 * 1. all pages in the same must be in one RPC in write back;
577 * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
578 * 3. must be covered by only 1 osc_lock;
579 * 4. exclusive. It's impossible to have overlapped osc_extent.
581 * The lifetime of an extent is from when the 1st page is dirtied to when
582 * all pages inside it are written out.
586 * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
589 /** red-black tree node */
590 struct rb_node oe_node;
591 /** osc_object of this extent */
592 struct osc_object *oe_obj;
593 /** refcount, removed from red-black tree if reaches zero. */
595 /** busy if non-zero */
597 /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
598 struct list_head oe_link;
599 /** state of this extent */
600 enum osc_extent_state oe_state;
601 /** flags for this extent. */
602 unsigned int oe_intree:1,
603 /** 0 is write, 1 is read */
605 /** sync extent, queued by osc_queue_sync_pages() */
607 /** set if this extent has partial, sync pages.
608 * Extents with partial page(s) can't merge with others in RPC */
612 /** an ACTIVE extent is going to be truncated, so when this extent
613 * is released, it will turn into TRUNC state instead of CACHE. */
615 /** this extent should be written asap and someone may wait for the
616 * write to finish. This bit is usually set along with urgent if
617 * the extent was CACHE state.
618 * fsync_wait extent can't be merged because new extent region may
619 * exceed fsync range. */
621 /** covering lock is being canceled */
623 /** this extent should be written back asap. set if one of pages is
624 * called by page WB daemon, or sync write or reading requests. */
626 /** how many grants allocated for this extent.
627 * Grant allocated for this extent. There is no grant allocated
628 * for reading extents and sync write extents. */
629 unsigned int oe_grants;
630 /** # of dirty pages in this extent */
631 unsigned int oe_nr_pages;
632 /** list of pending oap pages. Pages in this list are NOT sorted. */
633 struct list_head oe_pages;
634 /** Since an extent has to be written out in atomic, this is used to
635 * remember the next page need to be locked to write this extent out.
636 * Not used right now.
638 struct osc_page *oe_next_page;
639 /** start and end index of this extent, include start and end
640 * themselves. Page offset here is the page index of osc_pages.
641 * oe_start is used as keyword for red-black tree. */
644 /** maximum ending index of this extent, this is limited by
645 * max_pages_per_rpc, lock extent and chunk size. */
647 /** waitqueue - for those who want to be notified if this extent's
648 * state has changed. */
649 wait_queue_head_t oe_waitq;
650 /** lock covering this extent */
651 struct ldlm_lock *oe_dlmlock;
652 /** terminator of this extent. Must be true if this extent is in IO. */
653 struct task_struct *oe_owner;
654 /** return value of writeback. If somebody is waiting for this extent,
655 * this value can be known by outside world. */
657 /** max pages per rpc when this extent was created */
658 unsigned int oe_mppr;
661 int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
663 int osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
665 int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
666 pgoff_t start, pgoff_t end, enum cl_lock_mode mode);
668 typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
669 struct osc_page *, void *);
670 int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
671 struct osc_object *osc, pgoff_t start, pgoff_t end,
672 osc_page_gang_cbt cb, void *cbdata);
675 #endif /* OSC_CL_INTERNAL_H */