4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 * lustre/include/lustre_osc.h
34 * OSC layer structures and methods common for both OSC and MDC.
36 * This file contains OSC interfaces used by OSC and MDC. Most of them
37 * were just moved from lustre/osc/osc_cl_internal.h for Data-on-MDT
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 * Author: Mikhail Pershin <mike.pershin@intel.com>
48 #include <libcfs/libcfs.h>
50 #include <cl_object.h>
51 #include <lustre_crypto.h>
57 struct osc_quota_info {
58 /** linkage for quota hash table */
59 struct hlist_node oqi_hash;
64 ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
65 page is added to an rpc */
66 ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
67 ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
68 to give the caller a chance to update
69 or cancel the size of the io */
73 struct osc_async_page {
74 unsigned short oap_cmd;
76 struct list_head oap_pending_item;
77 struct list_head oap_rpc_item;
80 unsigned oap_page_off;
81 enum async_flags oap_async_flags;
83 struct brw_page oap_brw_page;
85 struct ptlrpc_request *oap_request;
86 struct osc_object *oap_obj;
89 #define oap_page oap_brw_page.pg
90 #define oap_count oap_brw_page.count
91 #define oap_brw_flags oap_brw_page.flag
93 static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
95 return container_of(pga, struct osc_async_page, oap_brw_page);
99 struct cl_device od_cl;
100 struct obd_export *od_exp;
102 /* Write stats is actually protected by client_obd's lock. */
105 uint64_t os_lockless_writes; /* by bytes */
106 uint64_t os_lockless_reads; /* by bytes */
109 /* configuration item(s) */
110 time64_t od_contention_time;
116 * State maintained by osc layer for each IO context.
120 struct cl_io_slice oi_cl;
121 /** true if this io is lockless. */
122 unsigned int oi_lockless:1,
123 /** true if this io is counted as active IO */
125 /** true if this io has CAP_SYS_RESOURCE */
126 oi_cap_sys_resource:1,
127 /** true if this io issued by readahead */
129 /** how many LRU pages are reserved for this IO */
130 unsigned long oi_lru_reserved;
132 /** active extents, we know how many bytes is going to be written,
133 * so having an active extent will prevent it from being fragmented */
134 struct osc_extent *oi_active;
135 /** partially truncated extent, we need to hold this extent to prevent
136 * page writeback from happening. */
137 struct osc_extent *oi_trunc;
138 /** write osc_lock for this IO, used by osc_extent_find(). */
139 struct osc_lock *oi_write_osclock;
140 struct osc_lock *oi_read_osclock;
142 struct osc_async_cbargs {
145 struct completion opc_sync;
150 * State maintained by osc layer for the duration of a system call.
156 #define OTI_PVEC_SIZE 256
157 struct osc_thread_info {
158 struct ldlm_res_id oti_resname;
159 union ldlm_policy_data oti_policy;
160 struct cl_attr oti_attr;
162 struct pagevec oti_pagevec;
163 void *oti_pvec[OTI_PVEC_SIZE];
165 * Fields used by cl_lock_discard_pages().
167 pgoff_t oti_next_index;
168 pgoff_t oti_fn_index; /* first non-overlapped index */
169 pgoff_t oti_ng_index; /* negative lock caching */
170 struct cl_sync_io oti_anchor;
171 struct cl_req_attr oti_req_attr;
172 struct lu_buf oti_ladvise_buf;
175 static inline __u64 osc_enq2ldlm_flags(__u32 enqflags)
179 CDEBUG(D_DLMTRACE, "flags: %x\n", enqflags);
181 LASSERT((enqflags & ~CEF_MASK) == 0);
183 if (enqflags & CEF_NONBLOCK)
184 result |= LDLM_FL_BLOCK_NOWAIT;
185 if (enqflags & CEF_GLIMPSE)
186 result |= LDLM_FL_HAS_INTENT|LDLM_FL_CBPENDING;
187 if (enqflags & CEF_DISCARD_DATA)
188 result |= LDLM_FL_AST_DISCARD_DATA;
189 if (enqflags & CEF_PEEK)
190 result |= LDLM_FL_TEST_LOCK;
191 if (enqflags & CEF_LOCK_MATCH)
192 result |= LDLM_FL_MATCH_LOCK;
193 if (enqflags & CEF_LOCK_NO_EXPAND)
194 result |= LDLM_FL_NO_EXPANSION;
195 if (enqflags & CEF_SPECULATIVE)
196 result |= LDLM_FL_SPECULATIVE;
200 typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
203 struct osc_enqueue_args {
204 struct obd_export *oa_exp;
205 enum ldlm_type oa_type;
206 enum ldlm_mode oa_mode;
208 osc_enqueue_upcall_f oa_upcall;
210 struct ost_lvb *oa_lvb;
211 struct lustre_handle oa_lockh;
216 * Bit flags for osc_dlm_lock_at_pageoff().
220 * Just check if the desired lock exists, it won't hold reference
223 OSC_DAP_FL_TEST_LOCK = BIT(0),
225 * Return the lock even if it is being canceled.
227 OSC_DAP_FL_CANCELING = BIT(1),
229 * check ast data is present, requested to cancel cb
231 OSC_DAP_FL_AST = BIT(2),
233 * look at right region for the desired lock
235 OSC_DAP_FL_RIGHT = BIT(3),
239 * The set of operations which are different for MDC and OSC objects
241 struct osc_object_operations {
242 void (*oto_build_res_name)(struct osc_object *osc,
243 struct ldlm_res_id *resname);
244 struct ldlm_lock* (*oto_dlmlock_at_pgoff)(const struct lu_env *env,
245 struct osc_object *obj,
247 enum osc_dap_flags dap_flags);
251 struct cl_object oo_cl;
252 struct lov_oinfo *oo_oinfo;
254 * True if locking against this stripe got -EUSERS.
257 ktime_t oo_contention_time;
258 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
260 * IO context used for invariant checks in osc_lock_has_pages().
262 struct cl_io oo_debug_io;
263 /** Serialization object for osc_object::oo_debug_io. */
264 struct mutex oo_debug_mutex;
267 * used by the osc to keep track of what objects to build into rpcs.
268 * Protected by client_obd->cli_loi_list_lock.
270 struct list_head oo_ready_item;
271 struct list_head oo_hp_ready_item;
272 struct list_head oo_write_item;
273 struct list_head oo_read_item;
276 * extent is a red black tree to manage (async) dirty pages.
278 struct rb_root oo_root;
280 * Manage write(dirty) extents.
282 struct list_head oo_hp_exts; /* list of hp extents */
283 struct list_head oo_urgent_exts; /* list of writeback extents */
284 struct list_head oo_full_exts;
286 struct list_head oo_reading_exts;
288 atomic_t oo_nr_reads;
289 atomic_t oo_nr_writes;
291 /** Protect extent tree. Will be used to protect
292 * oo_{read|write}_pages soon. */
296 * Radix tree for caching pages
298 spinlock_t oo_tree_lock;
299 struct radix_tree_root oo_tree;
300 unsigned long oo_npages;
302 /* Protect osc_lock this osc_object has */
303 struct list_head oo_ol_list;
304 spinlock_t oo_ol_spin;
306 /** number of active IOs of this object */
308 wait_queue_head_t oo_io_waitq;
310 const struct osc_object_operations *oo_obj_ops;
314 static inline void osc_build_res_name(struct osc_object *osc,
315 struct ldlm_res_id *resname)
317 return osc->oo_obj_ops->oto_build_res_name(osc, resname);
320 static inline struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
321 struct osc_object *obj,
323 enum osc_dap_flags flags)
325 return obj->oo_obj_ops->oto_dlmlock_at_pgoff(env, obj, index, flags);
328 static inline void osc_object_lock(struct osc_object *obj)
330 spin_lock(&obj->oo_lock);
333 static inline int osc_object_trylock(struct osc_object *obj)
335 return spin_trylock(&obj->oo_lock);
338 static inline void osc_object_unlock(struct osc_object *obj)
340 spin_unlock(&obj->oo_lock);
343 #define assert_osc_object_is_locked(obj) \
344 assert_spin_locked(&obj->oo_lock)
346 static inline void osc_object_set_contended(struct osc_object *obj)
348 obj->oo_contention_time = ktime_get();
350 obj->oo_contended = 1;
353 static inline void osc_object_clear_contended(struct osc_object *obj)
355 obj->oo_contended = 0;
359 * Lock "micro-states" for osc layer.
361 enum osc_lock_state {
370 * osc-private state of cl_lock.
372 * Interaction with DLM.
374 * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
375 * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
377 * This pointer is protected through a reference, acquired by
378 * osc_lock_upcall0(). Also, an additional reference is acquired by
379 * ldlm_lock_addref() call protecting the lock from cancellation, until
380 * osc_lock_unuse() releases it.
382 * Below is a description of how lock references are acquired and released
385 * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
386 * - ldlm_lock_create()
387 * - ldlm_lock_new(): initializes a lock with 2 references. One for
388 * the caller (released when reply from the server is received, or on
389 * error), and another for the hash table.
390 * - ldlm_lock_addref_internal(): protects the lock from cancellation.
392 * - When reply is received from the server (osc_enqueue_interpret())
393 * - ldlm_cli_enqueue_fini()
394 * - LDLM_LOCK_PUT(): releases caller reference acquired by
397 * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
398 * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
400 * - When lock is being cancelled (ldlm_lock_cancel())
401 * - ldlm_lock_destroy()
402 * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
405 * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
406 * either when lock is cancelled (osc_lock_blocking()), or when locks is
407 * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
408 * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
412 struct cl_lock_slice ols_cl;
413 /** Internal lock to protect states, etc. */
415 /** Owner sleeps on this channel for state change */
416 struct cl_sync_io *ols_owner;
417 /** waiting list for this lock to be cancelled */
418 struct list_head ols_waiting_list;
419 /** wait entry of ols_waiting_list */
420 struct list_head ols_wait_entry;
421 /** list entry for osc_object::oo_ol_list */
422 struct list_head ols_nextlock_oscobj;
424 /** underlying DLM lock */
425 struct ldlm_lock *ols_dlmlock;
426 /** DLM flags with which osc_lock::ols_lock was enqueued */
428 /** osc_lock::ols_lock handle */
429 struct lustre_handle ols_handle;
430 struct ldlm_enqueue_info ols_einfo;
431 enum osc_lock_state ols_state;
432 /** lock value block */
433 struct ost_lvb ols_lvb;
434 /** Lockless operations to be used by lockless lock */
435 const struct cl_lock_operations *ols_lockless_ops;
437 * true, if ldlm_lock_addref() was called against
438 * osc_lock::ols_lock. This is used for sanity checking.
440 * \see osc_lock::ols_has_ref
442 unsigned ols_hold :1,
444 * this is much like osc_lock::ols_hold, except that this bit is
445 * cleared _after_ reference in released in osc_lock_unuse(). This
446 * fine distinction is needed because:
448 * - if ldlm lock still has a reference, osc_ast_data_get() needs
449 * to return associated cl_lock (so that a flag is needed that is
450 * cleared after ldlm_lock_decref() returned), and
452 * - ldlm_lock_decref() can invoke blocking ast (for a
453 * LDLM_FL_CBPENDING lock), and osc_lock functions like
454 * osc_lock_cancel() called from there need to know whether to
455 * release lock reference (so that a flag is needed that is
456 * cleared before ldlm_lock_decref() is called).
460 * inherit the lockless attribute from top level cl_io.
461 * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
465 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
466 * the EVAVAIL error as torerable, this will make upper logic happy
467 * to wait all glimpse locks to each OSTs to be completed.
468 * Glimpse lock converts to normal lock if the server lock is granted.
469 * Glimpse lock should be destroyed immediately after use.
473 * For async glimpse lock.
477 * for speculative locks - asynchronous glimpse locks and ladvise
478 * lockahead manual lock requests
480 * Used to tell osc layer to not wait for the ldlm reply from the
481 * server, so the osc lock will be short lived - It only exists to
482 * create the ldlm request and is not updated on request completion.
487 static inline int osc_lock_is_lockless(const struct osc_lock *ols)
489 return (ols->ols_cl.cls_ops == ols->ols_lockless_ops);
493 * Page state private for osc layer.
496 struct cl_page_slice ops_cl;
498 * Page queues used by osc to detect when RPC can be formed.
500 struct osc_async_page ops_oap;
502 * An offset within page from which next transfer starts. This is used
503 * by cl_page_clip() to submit partial page transfers.
505 unsigned int ops_from:PAGE_SHIFT,
507 * An offset within page at which next transfer ends(inclusive).
509 * \see osc_page::ops_from.
513 * Boolean, true iff page is under transfer. Used for sanity checking.
515 ops_transfer_pinned:1,
521 * Set if the page must be transferred with OBD_BRW_SRVLOCK.
525 * If the page is in osc_object::oo_tree.
529 * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
531 struct list_head ops_lru;
534 struct osc_brw_async_args {
536 int aa_requested_nob;
540 struct brw_page **aa_ppga;
541 struct client_obd *aa_cli;
542 struct list_head aa_oaps;
543 struct list_head aa_exts;
546 extern struct kmem_cache *osc_lock_kmem;
547 extern struct kmem_cache *osc_object_kmem;
548 extern struct kmem_cache *osc_thread_kmem;
549 extern struct kmem_cache *osc_session_kmem;
550 extern struct kmem_cache *osc_extent_kmem;
551 extern struct kmem_cache *osc_quota_kmem;
552 extern struct kmem_cache *osc_obdo_kmem;
554 extern struct lu_context_key osc_key;
555 extern struct lu_context_key osc_session_key;
557 #define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
560 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
561 struct cl_page *page, pgoff_t ind);
562 void osc_index2policy(union ldlm_policy_data *policy, const struct cl_object *obj,
563 pgoff_t start, pgoff_t end);
564 void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
565 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
566 enum cl_req_type crt, int brw_flags);
567 int lru_queue_work(const struct lu_env *env, void *data);
568 long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
569 long target, bool force);
572 int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
574 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
575 struct cl_page *page, loff_t offset);
576 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
577 struct osc_page *ops, cl_commit_cbt cb);
578 int osc_page_cache_add(const struct lu_env *env, struct osc_page *opg,
579 struct cl_io *io, cl_commit_cbt cb);
580 int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
581 struct osc_page *ops);
582 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
583 struct osc_page *ops);
584 int osc_queue_sync_pages(const struct lu_env *env, struct cl_io *io,
585 struct osc_object *obj, struct list_head *list,
587 int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
588 __u64 size, struct osc_extent **extp);
589 void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
590 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
591 pgoff_t start, pgoff_t end, int hp, int discard);
592 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
593 pgoff_t start, pgoff_t end);
594 int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
595 struct osc_object *osc, int async);
596 static inline void osc_wake_cache_waiters(struct client_obd *cli)
598 wake_up(&cli->cl_cache_waiters);
601 static inline int osc_io_unplug_async(const struct lu_env *env,
602 struct client_obd *cli,
603 struct osc_object *osc)
605 return osc_io_unplug0(env, cli, osc, 1);
608 static inline void osc_io_unplug(const struct lu_env *env,
609 struct client_obd *cli,
610 struct osc_object *osc)
612 (void)osc_io_unplug0(env, cli, osc, 0);
615 typedef bool (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
616 void**, int, void *);
617 bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
618 struct osc_object *osc, pgoff_t start, pgoff_t end,
619 osc_page_gang_cbt cb, void *cbdata);
620 bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
621 void**, int, void *cbdata);
624 int osc_device_init(const struct lu_env *env, struct lu_device *d,
625 const char *name, struct lu_device *next);
626 struct lu_device *osc_device_fini(const struct lu_env *env,
627 struct lu_device *d);
628 struct lu_device *osc_device_free(const struct lu_env *env,
629 struct lu_device *d);
632 int osc_object_init(const struct lu_env *env, struct lu_object *obj,
633 const struct lu_object_conf *conf);
634 void osc_object_free(const struct lu_env *env, struct lu_object *obj);
635 int osc_lvb_print(const struct lu_env *env, void *cookie,
636 lu_printer_t p, const struct ost_lvb *lvb);
637 int osc_object_print(const struct lu_env *env, void *cookie,
638 lu_printer_t p, const struct lu_object *obj);
639 int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
640 struct cl_attr *attr);
641 int osc_attr_update(const struct lu_env *env, struct cl_object *obj,
642 const struct cl_attr *attr, unsigned valid);
643 int osc_object_glimpse(const struct lu_env *env, const struct cl_object *obj,
644 struct ost_lvb *lvb);
645 int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
646 int osc_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
647 ldlm_iterator_t iter, void *data);
648 int osc_object_prune(const struct lu_env *env, struct cl_object *obj);
651 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd);
652 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg);
653 int osc_precleanup_common(struct obd_device *obd);
654 int osc_cleanup_common(struct obd_device *obd);
655 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
656 u32 keylen, void *key, u32 vallen, void *val,
657 struct ptlrpc_request_set *set);
658 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
659 struct hlist_node *hnode, void *arg);
660 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
661 struct obd_device *obd, struct obd_uuid *cluuid,
662 struct obd_connect_data *data, void *localdata);
663 int osc_disconnect(struct obd_export *exp);
664 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
665 obd_enqueue_update_f upcall, void *cookie);
666 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
667 obd_enqueue_update_f upcall, void *cookie, int mode);
668 void osc_update_next_shrink(struct client_obd *cli);
669 void osc_schedule_grant_work(void);
672 int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
673 enum cl_req_type crt, struct cl_2queue *queue);
674 int osc_io_commit_async(const struct lu_env *env,
675 const struct cl_io_slice *ios,
676 struct cl_page_list *qin, int from, int to,
678 void osc_io_extent_release(const struct lu_env *env,
679 const struct cl_io_slice *ios);
680 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios);
681 void osc_io_iter_fini(const struct lu_env *env,
682 const struct cl_io_slice *ios);
683 void osc_io_rw_iter_fini(const struct lu_env *env,
684 const struct cl_io_slice *ios);
685 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios);
686 void osc_io_setattr_end(const struct lu_env *env,
687 const struct cl_io_slice *slice);
688 int osc_io_read_start(const struct lu_env *env,
689 const struct cl_io_slice *slice);
690 int osc_io_write_start(const struct lu_env *env,
691 const struct cl_io_slice *slice);
692 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice);
693 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
694 struct cl_fsync_io *fio);
695 void osc_io_fsync_end(const struct lu_env *env,
696 const struct cl_io_slice *slice);
697 void osc_read_ahead_release(const struct lu_env *env, struct cl_read_ahead *ra);
698 int osc_io_lseek_start(const struct lu_env *env,
699 const struct cl_io_slice *slice);
700 void osc_io_lseek_end(const struct lu_env *env,
701 const struct cl_io_slice *slice);
702 int osc_io_lru_reserve(const struct lu_env *env, const struct cl_io_slice *ios,
703 loff_t pos, size_t count);
704 int osc_punch_start(const struct lu_env *env, struct cl_io *io,
705 struct cl_object *obj);
708 void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols,
710 void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
711 struct osc_lock *oscl);
712 int osc_lock_enqueue_wait(const struct lu_env *env, struct osc_object *obj,
713 struct osc_lock *oscl);
714 void osc_lock_set_writer(const struct lu_env *env, const struct cl_io *io,
715 struct cl_object *obj, struct osc_lock *oscl);
716 void osc_lock_set_reader(const struct lu_env *env, const struct cl_io *io,
717 struct cl_object *obj, struct osc_lock *oscl);
718 int osc_lock_print(const struct lu_env *env, void *cookie,
719 lu_printer_t p, const struct cl_lock_slice *slice);
720 void osc_lock_cancel(const struct lu_env *env,
721 const struct cl_lock_slice *slice);
722 void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
723 int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data);
724 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
726 /*****************************************************************************
728 * Accessors and type conversions.
731 static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
733 struct osc_thread_info *info;
735 info = lu_context_key_get(&env->le_ctx, &osc_key);
736 LASSERT(info != NULL);
740 static inline struct osc_session *osc_env_session(const struct lu_env *env)
742 struct osc_session *ses;
744 ses = lu_context_key_get(env->le_ses, &osc_session_key);
745 LASSERT(ses != NULL);
749 static inline struct osc_io *osc_env_io(const struct lu_env *env)
751 return &osc_env_session(env)->os_io;
754 static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
756 return container_of_safe(d, struct osc_device, od_cl.cd_lu_dev);
759 static inline struct obd_export *osc_export(const struct osc_object *obj)
761 return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->od_exp;
764 static inline struct client_obd *osc_cli(const struct osc_object *obj)
766 return &osc_export(obj)->exp_obd->u.cli;
769 static inline struct osc_object *cl2osc(const struct cl_object *obj)
771 return container_of_safe(obj, struct osc_object, oo_cl);
774 static inline struct cl_object *osc2cl(const struct osc_object *obj)
776 return (struct cl_object *)&obj->oo_cl;
779 static inline struct osc_device *obd2osc_dev(const struct obd_device *obd)
781 return container_of_safe(obd->obd_lu_dev, struct osc_device,
785 static inline struct lu_device *osc2lu_dev(struct osc_device *osc)
787 return &osc->od_cl.cd_lu_dev;
790 static inline struct lu_object *osc2lu(struct osc_object *osc)
792 return &osc->oo_cl.co_lu;
795 static inline struct osc_object *lu2osc(const struct lu_object *obj)
797 return container_of_safe(obj, struct osc_object, oo_cl.co_lu);
800 static inline struct osc_io *cl2osc_io(const struct lu_env *env,
801 const struct cl_io_slice *slice)
803 struct osc_io *oio = container_of(slice, struct osc_io, oi_cl);
805 LINVRNT(oio == osc_env_io(env));
809 static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
811 LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
812 if (mode == CLM_READ)
814 if (mode == CLM_WRITE)
819 static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
821 LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
829 static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
831 return container_of_safe(slice, struct osc_page, ops_cl);
834 static inline struct osc_page *oap2osc(struct osc_async_page *oap)
836 return container_of_safe(oap, struct osc_page, ops_oap);
839 static inline pgoff_t osc_index(struct osc_page *opg)
841 return opg->ops_oap.oap_obj_off >> PAGE_SHIFT;
844 static inline struct osc_object *osc_page_object(struct osc_page *ops)
846 return ops->ops_oap.oap_obj;
849 static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
851 return oap2osc(oap)->ops_cl.cpl_page;
854 static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
856 return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
859 static inline struct osc_page *
860 osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
862 const struct cl_page_slice *slice;
864 LASSERT(osc != NULL);
865 slice = cl_object_page_slice(&osc->oo_cl, page);
866 return cl2osc_page(slice);
869 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
871 return container_of_safe(slice, struct osc_lock, ols_cl);
874 static inline int osc_io_srvlock(struct osc_io *oio)
876 return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
879 enum osc_extent_state {
880 OES_INV = 0, /** extent is just initialized or destroyed */
881 OES_ACTIVE = 1, /** process is using this extent */
882 OES_CACHE = 2, /** extent is ready for IO */
883 OES_LOCKING = 3, /** locking page to prepare IO */
884 OES_LOCK_DONE = 4, /** locking finished, ready to send */
885 OES_RPC = 5, /** in RPC */
886 OES_TRUNC = 6, /** being truncated */
891 * osc_extent data to manage dirty pages.
892 * osc_extent has the following attributes:
893 * 1. all pages in the same must be in one RPC in write back;
894 * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
895 * 3. must be covered by only 1 osc_lock;
896 * 4. exclusive. It's impossible to have overlapped osc_extent.
898 * The lifetime of an extent is from when the 1st page is dirtied to when
899 * all pages inside it are written out.
903 * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
906 /** red-black tree node */
907 struct rb_node oe_node;
908 /** osc_object of this extent */
909 struct osc_object *oe_obj;
910 /** refcount, removed from red-black tree if reaches zero. */
912 /** busy if non-zero */
914 /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
915 struct list_head oe_link;
916 /** state of this extent */
917 enum osc_extent_state oe_state;
918 /** flags for this extent. */
919 /** 0 is write, 1 is read */
920 unsigned int oe_rw:1,
921 /** sync extent, queued by osc_queue_sync_pages() */
923 /** set if this extent has partial, sync pages.
924 * Extents with partial page(s) can't merge with others in RPC */
928 /** an ACTIVE extent is going to be truncated, so when this extent
929 * is released, it will turn into TRUNC state instead of CACHE. */
931 /** this extent should be written asap and someone may wait for the
932 * write to finish. This bit is usually set along with urgent if
933 * the extent was CACHE state.
934 * fsync_wait extent can't be merged because new extent region may
935 * exceed fsync range. */
937 /** covering lock is being canceled */
939 /** this extent should be written back asap. set if one of pages is
940 * called by page WB daemon, or sync write or reading requests. */
942 /** Non-delay RPC should be used for this extent. */
944 /** direct IO pages */
946 /** this extent consists of pages that are not directly accessible
949 /** how many grants allocated for this extent.
950 * Grant allocated for this extent. There is no grant allocated
951 * for reading extents and sync write extents. */
952 unsigned int oe_grants;
953 /** # of dirty pages in this extent */
954 unsigned int oe_nr_pages;
955 /** list of pending oap pages. Pages in this list are NOT sorted. */
956 struct list_head oe_pages;
957 /** start and end index of this extent, include start and end
958 * themselves. Page offset here is the page index of osc_pages.
959 * oe_start is used as keyword for red-black tree. */
962 /** maximum ending index of this extent, this is limited by
963 * max_pages_per_rpc, lock extent and chunk size. */
965 /** waitqueue - for those who want to be notified if this extent's
966 * state has changed. */
967 wait_queue_head_t oe_waitq;
968 /** lock covering this extent */
969 struct ldlm_lock *oe_dlmlock;
970 /** terminator of this extent. Must be true if this extent is in IO. */
971 struct task_struct *oe_owner;
972 /** return value of writeback. If somebody is waiting for this extent,
973 * this value can be known by outside world. */
975 /** max pages per rpc when this extent was created */
976 unsigned int oe_mppr;
977 /** FLR: layout version when this osc_extent is publised */
978 __u32 oe_layout_version;
983 #endif /* LUSTRE_OSC_H */