4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 * lustre/include/lustre_osc.h
34 * OSC layer structures and methods common for both OSC and MDC.
36 * This file contains OSC interfaces used by OSC and MDC. Most of them
37 * were just moved from lustre/osc/osc_cl_internal.h for Data-on-MDT
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 * Author: Mikhail Pershin <mike.pershin@intel.com>
48 #include <libcfs/libcfs.h>
50 #include <cl_object.h>
51 #include <lustre_crypto.h>
57 enum oap_async_flags {
58 /* ap_make_ready will not be called before page is added to an rpc */
60 ASYNC_URGENT = 0x2, /* page must be put into RPC before return */
61 /* ap_refresh_count will not be called to give the caller a chance to
62 * update or cancel the size of the io
64 ASYNC_COUNT_STABLE = 0x4,
70 /* add explicit padding to keep fields aligned despite "packed",
71 * which is needed to pack with following field in osc_page
73 #define OAP_PAD_BITS (16 - OBD_BRW_WRITE - OAP_ASYNC_BITS)
74 struct osc_async_page {
75 unsigned short oap_page_off /* :PAGE_SHIFT */;
76 unsigned int oap_cmd:OBD_BRW_WRITE;
77 enum oap_async_flags oap_async_flags:OAP_ASYNC_BITS;
78 unsigned int oap_padding1:OAP_PAD_BITS; /* unused */
79 unsigned int oap_padding2; /* unused */
81 struct list_head oap_pending_item;
82 struct list_head oap_rpc_item;
86 struct ptlrpc_request *oap_request;
87 struct osc_object *oap_obj;
89 struct brw_page oap_brw_page;
90 } __attribute__((packed));
92 #define oap_page oap_brw_page.bp_page
93 #define oap_count oap_brw_page.bp_count
94 #define oap_brw_flags oap_brw_page.bp_flag
96 static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
98 BUILD_BUG_ON(OAP_ASYNC_MAX - 1 >= (1 << OAP_ASYNC_BITS));
99 return container_of(pga, struct osc_async_page, oap_brw_page);
103 struct cl_device osc_cl;
104 struct obd_export *osc_exp;
106 /* Write stats is actually protected by client_obd's lock. */
109 uint64_t os_lockless_writes; /* by bytes */
110 uint64_t os_lockless_reads; /* by bytes */
113 /* configuration item(s) */
114 time64_t osc_contention_time;
120 * State maintained by osc layer for each IO context.
124 struct cl_io_slice oi_cl;
125 /** true if this io is lockless. */
126 unsigned int oi_lockless:1,
127 /** true if this io is counted as active IO */
129 /** true if this io has CAP_SYS_RESOURCE */
130 oi_cap_sys_resource:1,
131 /** true if this io issued by readahead */
133 /** how many LRU pages are reserved for this IO */
134 unsigned long oi_lru_reserved;
136 /** active extents, we know how many bytes is going to be written,
137 * so having an active extent will prevent it from being fragmented
139 struct osc_extent *oi_active;
140 /** partially truncated extent, we need to hold this extent to prevent
141 * page writeback from happening.
143 struct osc_extent *oi_trunc;
144 /** write osc_lock for this IO, used by osc_extent_find(). */
145 struct osc_lock *oi_write_osclock;
146 struct osc_lock *oi_read_osclock;
148 struct osc_async_cbargs {
151 struct completion opc_sync;
156 * State maintained by osc layer for the duration of a system call.
162 #define OTI_PVEC_SIZE 256
163 struct osc_thread_info {
164 struct ldlm_res_id oti_resname;
165 union ldlm_policy_data oti_policy;
166 struct cl_attr oti_attr;
168 struct pagevec oti_pagevec;
169 void *oti_pvec[OTI_PVEC_SIZE];
171 * Fields used by cl_lock_discard_pages().
173 pgoff_t oti_next_index;
174 pgoff_t oti_fn_index; /* first non-overlapped index */
175 pgoff_t oti_ng_index; /* negative lock caching */
176 struct cl_sync_io oti_anchor;
177 struct cl_req_attr oti_req_attr;
178 struct lu_buf oti_ladvise_buf;
181 static inline __u64 osc_enq2ldlm_flags(__u32 enqflags)
185 CDEBUG(D_DLMTRACE, "flags: %x\n", enqflags);
187 LASSERT((enqflags & ~CEF_MASK) == 0);
189 if (enqflags & CEF_NONBLOCK)
190 result |= LDLM_FL_BLOCK_NOWAIT;
191 if (enqflags & CEF_GLIMPSE)
192 result |= LDLM_FL_HAS_INTENT|LDLM_FL_CBPENDING;
193 if (enqflags & CEF_DISCARD_DATA)
194 result |= LDLM_FL_AST_DISCARD_DATA;
195 if (enqflags & CEF_PEEK)
196 result |= LDLM_FL_TEST_LOCK;
197 if (enqflags & CEF_LOCK_MATCH)
198 result |= LDLM_FL_MATCH_LOCK;
199 if (enqflags & CEF_LOCK_NO_EXPAND)
200 result |= LDLM_FL_NO_EXPANSION;
201 if (enqflags & CEF_SPECULATIVE)
202 result |= LDLM_FL_SPECULATIVE;
206 typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
209 struct osc_enqueue_args {
210 struct obd_export *oa_exp;
211 enum ldlm_type oa_type;
212 enum ldlm_mode oa_mode;
214 osc_enqueue_upcall_f oa_upcall;
216 struct ost_lvb *oa_lvb;
217 struct lustre_handle oa_lockh;
222 * Bit flags for osc_dlm_lock_at_pageoff().
226 * Just check if the desired lock exists, it won't hold reference
229 OSC_DAP_FL_TEST_LOCK = BIT(0),
231 * Return the lock even if it is being canceled.
233 OSC_DAP_FL_CANCELING = BIT(1),
235 * check ast data is present, requested to cancel cb
237 OSC_DAP_FL_AST = BIT(2),
239 * look at right region for the desired lock
241 OSC_DAP_FL_RIGHT = BIT(3),
245 * The set of operations which are different for MDC and OSC objects
247 struct osc_object_operations {
248 void (*oto_build_res_name)(struct osc_object *osc,
249 struct ldlm_res_id *resname);
250 struct ldlm_lock* (*oto_dlmlock_at_pgoff)(const struct lu_env *env,
251 struct osc_object *obj,
253 enum osc_dap_flags dap_flags);
257 struct cl_object oo_cl;
258 struct lov_oinfo *oo_oinfo;
260 * True if locking against this stripe got -EUSERS.
263 ktime_t oo_contention_time;
264 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
266 * IO context used for invariant checks in osc_lock_has_pages().
268 struct cl_io oo_debug_io;
269 /** Serialization object for osc_object::oo_debug_io. */
270 struct mutex oo_debug_mutex;
273 * used by the osc to keep track of what objects to build into rpcs.
274 * Protected by client_obd->cli_loi_list_lock.
276 struct list_head oo_ready_item;
277 struct list_head oo_hp_ready_item;
278 struct list_head oo_write_item;
279 struct list_head oo_read_item;
282 * extent is a red black tree to manage (async) dirty pages.
284 struct rb_root oo_root;
286 * Manage write(dirty) extents.
288 struct list_head oo_hp_exts; /* list of hp extents */
289 struct list_head oo_urgent_exts; /* list of writeback extents */
290 struct list_head oo_full_exts;
292 struct list_head oo_reading_exts;
294 atomic_t oo_nr_reads;
295 atomic_t oo_nr_writes;
297 /** Protect extent tree. used to protect oo_{read|write}_pages soon. */
301 * Radix tree for caching pages
303 spinlock_t oo_tree_lock;
304 struct radix_tree_root oo_tree;
305 unsigned long oo_npages;
307 /* Protect osc_lock this osc_object has */
308 struct list_head oo_ol_list;
309 spinlock_t oo_ol_spin;
311 /** number of active IOs of this object */
313 wait_queue_head_t oo_io_waitq;
315 const struct osc_object_operations *oo_obj_ops;
319 static inline void osc_build_res_name(struct osc_object *osc,
320 struct ldlm_res_id *resname)
322 return osc->oo_obj_ops->oto_build_res_name(osc, resname);
325 static inline struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
326 struct osc_object *obj,
328 enum osc_dap_flags flags)
330 return obj->oo_obj_ops->oto_dlmlock_at_pgoff(env, obj, index, flags);
333 static inline void osc_object_lock(struct osc_object *obj)
335 spin_lock(&obj->oo_lock);
338 static inline int osc_object_trylock(struct osc_object *obj)
340 return spin_trylock(&obj->oo_lock);
343 static inline void osc_object_unlock(struct osc_object *obj)
345 spin_unlock(&obj->oo_lock);
348 #define assert_osc_object_is_locked(obj) \
349 assert_spin_locked(&obj->oo_lock)
351 static inline void osc_object_set_contended(struct osc_object *obj)
353 obj->oo_contention_time = ktime_get();
355 obj->oo_contended = 1;
358 static inline void osc_object_clear_contended(struct osc_object *obj)
360 obj->oo_contended = 0;
364 * Lock "micro-states" for osc layer.
366 enum osc_lock_state {
375 * osc-private state of cl_lock.
377 * Interaction with DLM.
379 * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
380 * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
382 * This pointer is protected through a reference, acquired by
383 * osc_lock_upcall0(). Also, an additional reference is acquired by
384 * ldlm_lock_addref() call protecting the lock from cancellation, until
385 * osc_lock_unuse() releases it.
387 * Below is a description of how lock references are acquired and released
390 * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
391 * - ldlm_lock_create()
392 * - ldlm_lock_new(): initializes a lock with 2 references. One for
393 * the caller (released when reply from the server is received, or on
394 * error), and another for the hash table.
395 * - ldlm_lock_addref_internal(): protects the lock from cancellation.
397 * - When reply is received from the server (osc_enqueue_interpret())
398 * - ldlm_cli_enqueue_fini()
399 * - LDLM_LOCK_PUT(): releases caller reference acquired by
402 * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
403 * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
405 * - When lock is being cancelled (ldlm_lock_cancel())
406 * - ldlm_lock_destroy()
407 * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
410 * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
411 * either when lock is cancelled (osc_lock_blocking()), or when locks is
412 * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
413 * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
417 struct cl_lock_slice ols_cl;
418 /** Internal lock to protect states, etc. */
420 /** Owner sleeps on this channel for state change */
421 struct cl_sync_io *ols_owner;
422 /** waiting list for this lock to be cancelled */
423 struct list_head ols_waiting_list;
424 /** wait entry of ols_waiting_list */
425 struct list_head ols_wait_entry;
426 /** list entry for osc_object::oo_ol_list */
427 struct list_head ols_nextlock_oscobj;
429 /** underlying DLM lock */
430 struct ldlm_lock *ols_dlmlock;
431 /** DLM flags with which osc_lock::ols_lock was enqueued */
433 /** osc_lock::ols_lock handle */
434 struct lustre_handle ols_handle;
435 struct ldlm_enqueue_info ols_einfo;
436 enum osc_lock_state ols_state;
437 /** lock value block */
438 struct ost_lvb ols_lvb;
439 /** Lockless operations to be used by lockless lock */
440 const struct cl_lock_operations *ols_lockless_ops;
442 * true, if ldlm_lock_addref() was called against
443 * osc_lock::ols_lock. This is used for sanity checking.
445 * \see osc_lock::ols_has_ref
447 unsigned ols_hold :1,
449 * this is much like osc_lock::ols_hold, except that this bit is
450 * cleared _after_ reference in released in osc_lock_unuse(). This
451 * fine distinction is needed because:
453 * - if ldlm lock still has a reference, osc_ast_data_get() needs
454 * to return associated cl_lock (so that a flag is needed that is
455 * cleared after ldlm_lock_decref() returned), and
457 * - ldlm_lock_decref() can invoke blocking ast (for a
458 * LDLM_FL_CBPENDING lock), and osc_lock functions like
459 * osc_lock_cancel() called from there need to know whether to
460 * release lock reference (so that a flag is needed that is
461 * cleared before ldlm_lock_decref() is called).
465 * inherit the lockless attribute from top level cl_io.
466 * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
470 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
471 * the EVAVAIL error as torerable, this will make upper logic happy
472 * to wait all glimpse locks to each OSTs to be completed.
473 * Glimpse lock converts to normal lock if the server lock is granted.
474 * Glimpse lock should be destroyed immediately after use.
478 * For async glimpse lock.
482 * for speculative locks - asynchronous glimpse locks and ladvise
483 * lockahead manual lock requests
485 * Used to tell osc layer to not wait for the ldlm reply from the
486 * server, so the osc lock will be short lived - It only exists to
487 * create the ldlm request and is not updated on request completion.
492 static inline int osc_lock_is_lockless(const struct osc_lock *ols)
494 return (ols->ols_cl.cls_ops == ols->ols_lockless_ops);
498 * Page state private for osc layer.
501 struct cl_page_slice ops_cl;
503 * Page queues used by osc to detect when RPC can be formed.
505 struct osc_async_page ops_oap;
507 * An offset within page from which next transfer starts. This is used
508 * by cl_page_clip() to submit partial page transfers.
510 unsigned int ops_from:PAGE_SHIFT,
512 * An offset within page at which next transfer ends(inclusive).
514 * \see osc_page::ops_from.
518 * Boolean, true iff page is under transfer. Used for sanity checking.
520 ops_transfer_pinned:1,
526 * Set if the page must be transferred with OBD_BRW_SRVLOCK.
530 * If the page is in osc_object::oo_tree.
534 * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
536 struct list_head ops_lru;
539 struct osc_brw_async_args {
541 int aa_requested_nob;
545 struct brw_page **aa_ppga;
546 struct client_obd *aa_cli;
547 struct list_head aa_oaps;
548 struct list_head aa_exts;
551 extern struct kmem_cache *osc_lock_kmem;
552 extern struct kmem_cache *osc_object_kmem;
553 extern struct kmem_cache *osc_thread_kmem;
554 extern struct kmem_cache *osc_session_kmem;
555 extern struct kmem_cache *osc_extent_kmem;
556 extern struct kmem_cache *osc_obdo_kmem;
558 extern struct lu_context_key osc_key;
559 extern struct lu_context_key osc_session_key;
561 #define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
564 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
565 struct cl_page *page, pgoff_t ind);
566 void osc_index2policy(union ldlm_policy_data *policy,
567 const struct cl_object *obj, pgoff_t start, pgoff_t end);
568 void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
569 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
570 enum cl_req_type crt, int brw_flags);
571 int lru_queue_work(const struct lu_env *env, void *data);
572 long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
573 long target, bool force);
576 int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
578 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
579 struct cl_page *page, loff_t offset);
580 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
581 struct osc_page *ops, cl_commit_cbt cb);
582 int osc_page_cache_add(const struct lu_env *env, struct osc_page *opg,
583 struct cl_io *io, cl_commit_cbt cb);
584 int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
585 struct osc_page *ops);
586 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
587 struct osc_page *ops);
588 int osc_queue_sync_pages(const struct lu_env *env, struct cl_io *io,
589 struct osc_object *obj, struct list_head *list,
591 int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
592 __u64 size, struct osc_extent **extp);
593 void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
594 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
595 pgoff_t start, pgoff_t end, int hp, int discard);
596 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
597 pgoff_t start, pgoff_t end);
598 int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
599 struct osc_object *osc, int async);
600 static inline void osc_wake_cache_waiters(struct client_obd *cli)
602 wake_up(&cli->cl_cache_waiters);
605 static inline int osc_io_unplug_async(const struct lu_env *env,
606 struct client_obd *cli,
607 struct osc_object *osc)
609 return osc_io_unplug0(env, cli, osc, 1);
612 static inline void osc_io_unplug(const struct lu_env *env,
613 struct client_obd *cli,
614 struct osc_object *osc)
616 (void)osc_io_unplug0(env, cli, osc, 0);
619 typedef bool (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
620 void**, int, void *);
621 bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
622 struct osc_object *osc, pgoff_t start, pgoff_t end,
623 osc_page_gang_cbt cb, void *cbdata);
624 bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
625 void **pvec, int count, void *cbdata);
628 int osc_device_init(const struct lu_env *env, struct lu_device *d,
629 const char *name, struct lu_device *next);
630 struct lu_device *osc_device_fini(const struct lu_env *env,
631 struct lu_device *d);
632 struct lu_device *osc_device_free(const struct lu_env *env,
633 struct lu_device *d);
636 int osc_object_init(const struct lu_env *env, struct lu_object *obj,
637 const struct lu_object_conf *conf);
638 void osc_object_free(const struct lu_env *env, struct lu_object *obj);
639 int osc_lvb_print(const struct lu_env *env, void *cookie,
640 lu_printer_t p, const struct ost_lvb *lvb);
641 int osc_object_print(const struct lu_env *env, void *cookie,
642 lu_printer_t p, const struct lu_object *obj);
643 int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
644 struct cl_attr *attr);
645 int osc_attr_update(const struct lu_env *env, struct cl_object *obj,
646 const struct cl_attr *attr, unsigned int valid);
647 int osc_object_glimpse(const struct lu_env *env, const struct cl_object *obj,
648 struct ost_lvb *lvb);
649 int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
650 int osc_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
651 ldlm_iterator_t iter, void *data);
652 int osc_object_prune(const struct lu_env *env, struct cl_object *obj);
655 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd);
656 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg);
657 int osc_precleanup_common(struct obd_device *obd);
658 int osc_cleanup_common(struct obd_device *obd);
659 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
660 u32 keylen, void *key, u32 vallen, void *val,
661 struct ptlrpc_request_set *set);
662 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
663 struct hlist_node *hnode, void *arg);
664 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
665 struct obd_device *obd, struct obd_uuid *cluuid,
666 struct obd_connect_data *data, void *localdata);
667 int osc_disconnect(struct obd_export *exp);
668 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
669 obd_enqueue_update_f upcall, void *cookie);
670 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
671 obd_enqueue_update_f upcall, void *cookie, int mode);
672 void osc_update_next_shrink(struct client_obd *cli);
673 void osc_schedule_grant_work(void);
676 int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
677 enum cl_req_type crt, struct cl_2queue *queue);
678 int osc_io_commit_async(const struct lu_env *env,
679 const struct cl_io_slice *ios,
680 struct cl_page_list *qin, int from, int to,
682 void osc_io_extent_release(const struct lu_env *env,
683 const struct cl_io_slice *ios);
684 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios);
685 void osc_io_iter_fini(const struct lu_env *env,
686 const struct cl_io_slice *ios);
687 void osc_io_rw_iter_fini(const struct lu_env *env,
688 const struct cl_io_slice *ios);
689 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios);
690 void osc_io_setattr_end(const struct lu_env *env,
691 const struct cl_io_slice *slice);
692 int osc_io_read_start(const struct lu_env *env,
693 const struct cl_io_slice *slice);
694 int osc_io_write_start(const struct lu_env *env,
695 const struct cl_io_slice *slice);
696 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice);
697 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
698 struct cl_fsync_io *fio);
699 void osc_io_fsync_end(const struct lu_env *env,
700 const struct cl_io_slice *slice);
701 void osc_read_ahead_release(const struct lu_env *env, struct cl_read_ahead *ra);
702 int osc_io_lseek_start(const struct lu_env *env,
703 const struct cl_io_slice *slice);
704 void osc_io_lseek_end(const struct lu_env *env,
705 const struct cl_io_slice *slice);
706 int osc_io_lru_reserve(const struct lu_env *env, const struct cl_io_slice *ios,
707 loff_t pos, size_t count);
708 int osc_punch_start(const struct lu_env *env, struct cl_io *io,
709 struct cl_object *obj);
712 void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols,
714 void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
715 struct osc_lock *oscl);
716 int osc_lock_enqueue_wait(const struct lu_env *env, struct osc_object *obj,
717 struct osc_lock *oscl);
718 void osc_lock_set_writer(const struct lu_env *env, const struct cl_io *io,
719 struct cl_object *obj, struct osc_lock *oscl);
720 void osc_lock_set_reader(const struct lu_env *env, const struct cl_io *io,
721 struct cl_object *obj, struct osc_lock *oscl);
722 int osc_lock_print(const struct lu_env *env, void *cookie,
723 lu_printer_t p, const struct cl_lock_slice *slice);
724 void osc_lock_cancel(const struct lu_env *env,
725 const struct cl_lock_slice *slice);
726 void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
727 int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data);
728 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
730 /* Accessors and type conversions. */
731 static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
733 struct osc_thread_info *info;
735 info = lu_context_key_get(&env->le_ctx, &osc_key);
736 LASSERT(info != NULL);
740 static inline struct osc_session *osc_env_session(const struct lu_env *env)
742 struct osc_session *ses;
744 ses = lu_context_key_get(env->le_ses, &osc_session_key);
745 LASSERT(ses != NULL);
749 static inline struct osc_io *osc_env_io(const struct lu_env *env)
751 return &osc_env_session(env)->os_io;
754 static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
756 return container_of_safe(d, struct osc_device, osc_cl.cd_lu_dev);
759 static inline struct obd_export *osc_export(const struct osc_object *obj)
761 return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->osc_exp;
764 static inline struct client_obd *osc_cli(const struct osc_object *obj)
766 return &osc_export(obj)->exp_obd->u.cli;
769 static inline struct osc_object *cl2osc(const struct cl_object *obj)
771 return container_of_safe(obj, struct osc_object, oo_cl);
774 static inline struct cl_object *osc2cl(const struct osc_object *obj)
776 return (struct cl_object *)&obj->oo_cl;
779 static inline struct osc_device *obd2osc_dev(const struct obd_device *obd)
781 return container_of_safe(obd->obd_lu_dev, struct osc_device,
785 static inline struct lu_device *osc2lu_dev(struct osc_device *osc)
787 return &osc->osc_cl.cd_lu_dev;
790 static inline struct lu_object *osc2lu(struct osc_object *osc)
792 return &osc->oo_cl.co_lu;
795 static inline struct osc_object *lu2osc(const struct lu_object *obj)
797 return container_of_safe(obj, struct osc_object, oo_cl.co_lu);
800 static inline struct osc_io *cl2osc_io(const struct lu_env *env,
801 const struct cl_io_slice *slice)
803 struct osc_io *oio = container_of(slice, struct osc_io, oi_cl);
805 LINVRNT(oio == osc_env_io(env));
809 static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
811 LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
812 if (mode == CLM_READ)
814 if (mode == CLM_WRITE)
819 static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
821 LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
829 static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
831 return container_of_safe(slice, struct osc_page, ops_cl);
834 static inline struct osc_page *oap2osc(struct osc_async_page *oap)
836 return container_of_safe(oap, struct osc_page, ops_oap);
839 static inline pgoff_t osc_index(struct osc_page *opg)
841 return opg->ops_oap.oap_obj_off >> PAGE_SHIFT;
844 static inline struct osc_object *osc_page_object(struct osc_page *ops)
846 return ops->ops_oap.oap_obj;
849 static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
851 return oap2osc(oap)->ops_cl.cpl_page;
854 static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
856 return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
859 static inline struct osc_page *
860 osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
862 const struct cl_page_slice *slice;
864 LASSERT(osc != NULL);
865 slice = cl_object_page_slice(&osc->oo_cl, page);
866 return cl2osc_page(slice);
869 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
871 return container_of_safe(slice, struct osc_lock, ols_cl);
874 static inline int osc_io_srvlock(struct osc_io *oio)
876 return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
879 enum osc_extent_state {
880 OES_INV = 0, /** extent is just initialized or destroyed */
881 OES_ACTIVE = 1, /** process is using this extent */
882 OES_CACHE = 2, /** extent is ready for IO */
883 OES_LOCKING = 3, /** locking page to prepare IO */
884 OES_LOCK_DONE = 4, /** locking finished, ready to send */
885 OES_RPC = 5, /** in RPC */
886 OES_TRUNC = 6, /** being truncated */
891 * osc_extent data to manage dirty pages.
892 * osc_extent has the following attributes:
893 * 1. all pages in the same must be in one RPC in write back;
894 * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
895 * 3. must be covered by only 1 osc_lock;
896 * 4. exclusive. It's impossible to have overlapped osc_extent.
898 * The lifetime of an extent is from when the 1st page is dirtied to when
899 * all pages inside it are written out.
903 * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
906 /** red-black tree node */
907 struct rb_node oe_node;
908 /** osc_object of this extent */
909 struct osc_object *oe_obj;
910 /** refcount, removed from red-black tree if reaches zero. */
912 /** busy if non-zero */
914 /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
915 struct list_head oe_link;
916 /** state of this extent */
917 enum osc_extent_state oe_state;
918 /** flags for this extent. */
919 /** 0 is write, 1 is read */
920 unsigned int oe_rw:1,
921 /** sync extent, queued by osc_queue_sync_pages() */
923 /** set if this extent has partial, sync pages.
924 * Extents with partial page(s) can't merge with others in RPC
929 /** an ACTIVE extent is going to be truncated, so when this extent
930 * is released, it will turn into TRUNC state instead of CACHE.
933 /** this extent should be written asap and someone may wait for the
934 * write to finish. This bit is usually set along with urgent if
935 * the extent was CACHE state.
936 * fsync_wait extent can't be merged because new extent region may
937 * exceed fsync range.
940 /** covering lock is being canceled */
942 /** this extent should be written back asap. set if one of pages is
943 * called by page WB daemon, or sync write or reading requests.
946 /** Non-delay RPC should be used for this extent. */
948 /** direct IO pages */
950 /** this extent consists of pages that are not directly accessible
954 /** how many grants allocated for this extent.
955 * Grant allocated for this extent. There is no grant allocated
956 * for reading extents and sync write extents.
958 unsigned int oe_grants;
959 /** # of dirty pages in this extent */
960 unsigned int oe_nr_pages;
961 /** list of pending oap pages. Pages in this list are NOT sorted. */
962 struct list_head oe_pages;
963 /** start and end index of this extent, include start and end
964 * themselves. Page offset here is the page index of osc_pages.
965 * oe_start is used as keyword for red-black tree.
969 /** maximum ending index of this extent, this is limited by
970 * max_pages_per_rpc, lock extent and chunk size.
973 /** waitqueue - for those who want to be notified if this extent's
976 wait_queue_head_t oe_waitq;
977 /** lock covering this extent */
978 struct ldlm_lock *oe_dlmlock;
979 /** terminator of this extent. Must be true if this extent is in IO. */
980 struct task_struct *oe_owner;
981 /** return value of writeback. If somebody is waiting for this extent,
982 * this value can be known by outside world.
985 /** max pages per rpc when this extent was created */
986 unsigned int oe_mppr;
987 /** FLR: layout version when this osc_extent is publised */
988 __u32 oe_layout_version;
993 #endif /* LUSTRE_OSC_H */