4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 * lustre/include/lustre_osc.h
35 * OSC layer structures and methods common for both OSC and MDC.
37 * This file contains OSC interfaces used by OSC and MDC. Most of them
38 * were just moved from lustre/osc/osc_cl_internal.h for Data-on-MDT
41 * Author: Nikita Danilov <nikita.danilov@sun.com>
42 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
43 * Author: Mikhail Pershin <mike.pershin@intel.com>
49 #include <libcfs/libcfs.h>
51 #include <cl_object.h>
52 #include <lustre_crypto.h>
58 struct osc_quota_info {
59 /** linkage for quota hash table */
60 struct hlist_node oqi_hash;
65 ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
66 page is added to an rpc */
67 ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
68 ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
69 to give the caller a chance to update
70 or cancel the size of the io */
74 struct osc_async_page {
76 unsigned short oap_cmd;
78 struct list_head oap_pending_item;
79 struct list_head oap_rpc_item;
82 unsigned oap_page_off;
83 enum async_flags oap_async_flags;
85 struct brw_page oap_brw_page;
87 struct ptlrpc_request *oap_request;
88 struct client_obd *oap_cli;
89 struct osc_object *oap_obj;
94 #define oap_page oap_brw_page.pg
95 #define oap_count oap_brw_page.count
96 #define oap_brw_flags oap_brw_page.flag
98 static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
100 return container_of(pga, struct osc_async_page, oap_brw_page);
104 struct cl_device od_cl;
105 struct obd_export *od_exp;
107 /* Write stats is actually protected by client_obd's lock. */
109 uint64_t os_lockless_writes; /* by bytes */
110 uint64_t os_lockless_reads; /* by bytes */
111 uint64_t os_lockless_truncates; /* by times */
114 /* configuration item(s) */
115 time64_t od_contention_time;
116 int od_lockless_truncate;
122 * State maintained by osc layer for each IO context.
126 struct cl_io_slice oi_cl;
127 /** true if this io is lockless. */
128 unsigned int oi_lockless:1,
129 /** true if this io is counted as active IO */
131 /** true if this io has CAP_SYS_RESOURCE */
132 oi_cap_sys_resource:1;
133 /** how many LRU pages are reserved for this IO */
134 unsigned long oi_lru_reserved;
136 /** active extents, we know how many bytes is going to be written,
137 * so having an active extent will prevent it from being fragmented */
138 struct osc_extent *oi_active;
139 /** partially truncated extent, we need to hold this extent to prevent
140 * page writeback from happening. */
141 struct osc_extent *oi_trunc;
142 /** write osc_lock for this IO, used by osc_extent_find(). */
143 struct osc_lock *oi_write_osclock;
145 struct osc_async_cbargs {
148 struct completion opc_sync;
153 * State maintained by osc layer for the duration of a system call.
159 #define OTI_PVEC_SIZE 256
160 struct osc_thread_info {
161 struct ldlm_res_id oti_resname;
162 union ldlm_policy_data oti_policy;
163 struct cl_attr oti_attr;
165 struct pagevec oti_pagevec;
166 void *oti_pvec[OTI_PVEC_SIZE];
168 * Fields used by cl_lock_discard_pages().
170 pgoff_t oti_next_index;
171 pgoff_t oti_fn_index; /* first non-overlapped index */
172 struct cl_sync_io oti_anchor;
173 struct cl_req_attr oti_req_attr;
174 struct lu_buf oti_ladvise_buf;
177 static inline __u64 osc_enq2ldlm_flags(__u32 enqflags)
181 CDEBUG(D_DLMTRACE, "flags: %x\n", enqflags);
183 LASSERT((enqflags & ~CEF_MASK) == 0);
185 if (enqflags & CEF_NONBLOCK)
186 result |= LDLM_FL_BLOCK_NOWAIT;
187 if (enqflags & CEF_GLIMPSE)
188 result |= LDLM_FL_HAS_INTENT;
189 if (enqflags & CEF_DISCARD_DATA)
190 result |= LDLM_FL_AST_DISCARD_DATA;
191 if (enqflags & CEF_PEEK)
192 result |= LDLM_FL_TEST_LOCK;
193 if (enqflags & CEF_LOCK_MATCH)
194 result |= LDLM_FL_MATCH_LOCK;
195 if (enqflags & CEF_LOCK_NO_EXPAND)
196 result |= LDLM_FL_NO_EXPANSION;
197 if (enqflags & CEF_SPECULATIVE)
198 result |= LDLM_FL_SPECULATIVE;
202 typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
205 struct osc_enqueue_args {
206 struct obd_export *oa_exp;
207 enum ldlm_type oa_type;
208 enum ldlm_mode oa_mode;
210 osc_enqueue_upcall_f oa_upcall;
212 struct ost_lvb *oa_lvb;
213 struct lustre_handle oa_lockh;
218 * Bit flags for osc_dlm_lock_at_pageoff().
222 * Just check if the desired lock exists, it won't hold reference
225 OSC_DAP_FL_TEST_LOCK = BIT(0),
227 * Return the lock even if it is being canceled.
229 OSC_DAP_FL_CANCELING = BIT(1),
233 * The set of operations which are different for MDC and OSC objects
235 struct osc_object_operations {
236 void (*oto_build_res_name)(struct osc_object *osc,
237 struct ldlm_res_id *resname);
238 struct ldlm_lock* (*oto_dlmlock_at_pgoff)(const struct lu_env *env,
239 struct osc_object *obj,
241 enum osc_dap_flags dap_flags);
245 struct cl_object oo_cl;
246 struct lov_oinfo *oo_oinfo;
248 * True if locking against this stripe got -EUSERS.
251 ktime_t oo_contention_time;
252 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
254 * IO context used for invariant checks in osc_lock_has_pages().
256 struct cl_io oo_debug_io;
257 /** Serialization object for osc_object::oo_debug_io. */
258 struct mutex oo_debug_mutex;
261 * used by the osc to keep track of what objects to build into rpcs.
262 * Protected by client_obd->cli_loi_list_lock.
264 struct list_head oo_ready_item;
265 struct list_head oo_hp_ready_item;
266 struct list_head oo_write_item;
267 struct list_head oo_read_item;
270 * extent is a red black tree to manage (async) dirty pages.
272 struct rb_root oo_root;
274 * Manage write(dirty) extents.
276 struct list_head oo_hp_exts; /* list of hp extents */
277 struct list_head oo_urgent_exts; /* list of writeback extents */
278 struct list_head oo_full_exts;
280 struct list_head oo_reading_exts;
282 atomic_t oo_nr_reads;
283 atomic_t oo_nr_writes;
285 /** Protect extent tree. Will be used to protect
286 * oo_{read|write}_pages soon. */
290 * Radix tree for caching pages
292 spinlock_t oo_tree_lock;
293 struct radix_tree_root oo_tree;
294 unsigned long oo_npages;
296 /* Protect osc_lock this osc_object has */
297 struct list_head oo_ol_list;
298 spinlock_t oo_ol_spin;
300 /** number of active IOs of this object */
302 wait_queue_head_t oo_io_waitq;
304 const struct osc_object_operations *oo_obj_ops;
308 static inline void osc_build_res_name(struct osc_object *osc,
309 struct ldlm_res_id *resname)
311 return osc->oo_obj_ops->oto_build_res_name(osc, resname);
314 static inline struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
315 struct osc_object *obj,
317 enum osc_dap_flags flags)
319 return obj->oo_obj_ops->oto_dlmlock_at_pgoff(env, obj, index, flags);
322 static inline void osc_object_lock(struct osc_object *obj)
324 spin_lock(&obj->oo_lock);
327 static inline int osc_object_trylock(struct osc_object *obj)
329 return spin_trylock(&obj->oo_lock);
332 static inline void osc_object_unlock(struct osc_object *obj)
334 spin_unlock(&obj->oo_lock);
337 #define assert_osc_object_is_locked(obj) \
338 assert_spin_locked(&obj->oo_lock)
340 static inline void osc_object_set_contended(struct osc_object *obj)
342 obj->oo_contention_time = ktime_get();
344 obj->oo_contended = 1;
347 static inline void osc_object_clear_contended(struct osc_object *obj)
349 obj->oo_contended = 0;
353 * Lock "micro-states" for osc layer.
355 enum osc_lock_state {
364 * osc-private state of cl_lock.
366 * Interaction with DLM.
368 * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
369 * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
371 * This pointer is protected through a reference, acquired by
372 * osc_lock_upcall0(). Also, an additional reference is acquired by
373 * ldlm_lock_addref() call protecting the lock from cancellation, until
374 * osc_lock_unuse() releases it.
376 * Below is a description of how lock references are acquired and released
379 * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
380 * - ldlm_lock_create()
381 * - ldlm_lock_new(): initializes a lock with 2 references. One for
382 * the caller (released when reply from the server is received, or on
383 * error), and another for the hash table.
384 * - ldlm_lock_addref_internal(): protects the lock from cancellation.
386 * - When reply is received from the server (osc_enqueue_interpret())
387 * - ldlm_cli_enqueue_fini()
388 * - LDLM_LOCK_PUT(): releases caller reference acquired by
391 * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
392 * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
394 * - When lock is being cancelled (ldlm_lock_cancel())
395 * - ldlm_lock_destroy()
396 * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
399 * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
400 * either when lock is cancelled (osc_lock_blocking()), or when locks is
401 * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
402 * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
406 struct cl_lock_slice ols_cl;
407 /** Internal lock to protect states, etc. */
409 /** Owner sleeps on this channel for state change */
410 struct cl_sync_io *ols_owner;
411 /** waiting list for this lock to be cancelled */
412 struct list_head ols_waiting_list;
413 /** wait entry of ols_waiting_list */
414 struct list_head ols_wait_entry;
415 /** list entry for osc_object::oo_ol_list */
416 struct list_head ols_nextlock_oscobj;
418 /** underlying DLM lock */
419 struct ldlm_lock *ols_dlmlock;
420 /** DLM flags with which osc_lock::ols_lock was enqueued */
422 /** osc_lock::ols_lock handle */
423 struct lustre_handle ols_handle;
424 struct ldlm_enqueue_info ols_einfo;
425 enum osc_lock_state ols_state;
426 /** lock value block */
427 struct ost_lvb ols_lvb;
428 /** Lockless operations to be used by lockless lock */
429 const struct cl_lock_operations *ols_lockless_ops;
431 * true, if ldlm_lock_addref() was called against
432 * osc_lock::ols_lock. This is used for sanity checking.
434 * \see osc_lock::ols_has_ref
436 unsigned ols_hold :1,
438 * this is much like osc_lock::ols_hold, except that this bit is
439 * cleared _after_ reference in released in osc_lock_unuse(). This
440 * fine distinction is needed because:
442 * - if ldlm lock still has a reference, osc_ast_data_get() needs
443 * to return associated cl_lock (so that a flag is needed that is
444 * cleared after ldlm_lock_decref() returned), and
446 * - ldlm_lock_decref() can invoke blocking ast (for a
447 * LDLM_FL_CBPENDING lock), and osc_lock functions like
448 * osc_lock_cancel() called from there need to know whether to
449 * release lock reference (so that a flag is needed that is
450 * cleared before ldlm_lock_decref() is called).
454 * inherit the lockless attribute from top level cl_io.
455 * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
459 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
460 * the EVAVAIL error as torerable, this will make upper logic happy
461 * to wait all glimpse locks to each OSTs to be completed.
462 * Glimpse lock converts to normal lock if the server lock is granted.
463 * Glimpse lock should be destroyed immediately after use.
467 * For async glimpse lock.
471 * for speculative locks - asynchronous glimpse locks and ladvise
472 * lockahead manual lock requests
474 * Used to tell osc layer to not wait for the ldlm reply from the
475 * server, so the osc lock will be short lived - It only exists to
476 * create the ldlm request and is not updated on request completion.
481 static inline int osc_lock_is_lockless(const struct osc_lock *ols)
483 return (ols->ols_cl.cls_ops == ols->ols_lockless_ops);
487 * Page state private for osc layer.
490 struct cl_page_slice ops_cl;
492 * Page queues used by osc to detect when RPC can be formed.
494 struct osc_async_page ops_oap;
496 * An offset within page from which next transfer starts. This is used
497 * by cl_page_clip() to submit partial page transfers.
499 unsigned int ops_from:PAGE_SHIFT,
501 * An offset within page at which next transfer ends(inclusive).
503 * \see osc_page::ops_from.
507 * Boolean, true iff page is under transfer. Used for sanity checking.
509 ops_transfer_pinned:1,
515 * Set if the page must be transferred with OBD_BRW_SRVLOCK.
519 * If the page is in osc_object::oo_tree.
523 * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
525 struct list_head ops_lru;
527 * Submit time - the time when the page is starting RPC. For debugging.
529 ktime_t ops_submit_time;
532 struct osc_brw_async_args {
534 int aa_requested_nob;
538 struct brw_page **aa_ppga;
539 struct client_obd *aa_cli;
540 struct list_head aa_oaps;
541 struct list_head aa_exts;
544 extern struct kmem_cache *osc_lock_kmem;
545 extern struct kmem_cache *osc_object_kmem;
546 extern struct kmem_cache *osc_thread_kmem;
547 extern struct kmem_cache *osc_session_kmem;
548 extern struct kmem_cache *osc_extent_kmem;
549 extern struct kmem_cache *osc_quota_kmem;
550 extern struct kmem_cache *osc_obdo_kmem;
552 extern struct lu_context_key osc_key;
553 extern struct lu_context_key osc_session_key;
555 #define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
558 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
559 struct cl_page *page, pgoff_t ind);
560 void osc_index2policy(union ldlm_policy_data *policy, const struct cl_object *obj,
561 pgoff_t start, pgoff_t end);
562 void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
563 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
564 enum cl_req_type crt, int brw_flags);
565 int lru_queue_work(const struct lu_env *env, void *data);
566 long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
567 long target, bool force);
570 int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
572 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
573 struct page *page, loff_t offset);
574 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
575 struct osc_page *ops, cl_commit_cbt cb);
576 int osc_page_cache_add(const struct lu_env *env, struct osc_page *opg,
577 struct cl_io *io, cl_commit_cbt cb);
578 int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
579 struct osc_page *ops);
580 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
581 struct osc_page *ops);
582 int osc_queue_sync_pages(const struct lu_env *env, const struct cl_io *io,
583 struct osc_object *obj, struct list_head *list,
585 int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
586 __u64 size, struct osc_extent **extp);
587 void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
588 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
589 pgoff_t start, pgoff_t end, int hp, int discard);
590 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
591 pgoff_t start, pgoff_t end);
592 int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
593 struct osc_object *osc, int async);
594 static inline void osc_wake_cache_waiters(struct client_obd *cli)
596 wake_up(&cli->cl_cache_waiters);
599 static inline int osc_io_unplug_async(const struct lu_env *env,
600 struct client_obd *cli,
601 struct osc_object *osc)
603 return osc_io_unplug0(env, cli, osc, 1);
606 static inline void osc_io_unplug(const struct lu_env *env,
607 struct client_obd *cli,
608 struct osc_object *osc)
610 (void)osc_io_unplug0(env, cli, osc, 0);
613 typedef bool (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
614 struct osc_page *, void *);
615 bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
616 struct osc_object *osc, pgoff_t start, pgoff_t end,
617 osc_page_gang_cbt cb, void *cbdata);
618 bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
619 struct osc_page *ops, void *cbdata);
622 int osc_device_init(const struct lu_env *env, struct lu_device *d,
623 const char *name, struct lu_device *next);
624 struct lu_device *osc_device_fini(const struct lu_env *env,
625 struct lu_device *d);
626 struct lu_device *osc_device_free(const struct lu_env *env,
627 struct lu_device *d);
630 int osc_object_init(const struct lu_env *env, struct lu_object *obj,
631 const struct lu_object_conf *conf);
632 void osc_object_free(const struct lu_env *env, struct lu_object *obj);
633 int osc_lvb_print(const struct lu_env *env, void *cookie,
634 lu_printer_t p, const struct ost_lvb *lvb);
635 int osc_object_print(const struct lu_env *env, void *cookie,
636 lu_printer_t p, const struct lu_object *obj);
637 int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
638 struct cl_attr *attr);
639 int osc_attr_update(const struct lu_env *env, struct cl_object *obj,
640 const struct cl_attr *attr, unsigned valid);
641 int osc_object_glimpse(const struct lu_env *env, const struct cl_object *obj,
642 struct ost_lvb *lvb);
643 int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
644 int osc_object_is_contended(struct osc_object *obj);
645 int osc_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
646 ldlm_iterator_t iter, void *data);
647 int osc_object_prune(const struct lu_env *env, struct cl_object *obj);
650 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd);
651 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg);
652 int osc_precleanup_common(struct obd_device *obd);
653 int osc_cleanup_common(struct obd_device *obd);
654 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
655 u32 keylen, void *key, u32 vallen, void *val,
656 struct ptlrpc_request_set *set);
657 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
658 struct hlist_node *hnode, void *arg);
659 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
660 struct obd_device *obd, struct obd_uuid *cluuid,
661 struct obd_connect_data *data, void *localdata);
662 int osc_disconnect(struct obd_export *exp);
663 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
664 obd_enqueue_update_f upcall, void *cookie);
667 int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
668 enum cl_req_type crt, struct cl_2queue *queue);
669 int osc_io_commit_async(const struct lu_env *env,
670 const struct cl_io_slice *ios,
671 struct cl_page_list *qin, int from, int to,
673 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios);
674 void osc_io_iter_fini(const struct lu_env *env,
675 const struct cl_io_slice *ios);
676 int osc_io_rw_iter_init(const struct lu_env *env,
677 const struct cl_io_slice *ios);
678 void osc_io_rw_iter_fini(const struct lu_env *env,
679 const struct cl_io_slice *ios);
680 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios);
681 void osc_io_setattr_end(const struct lu_env *env,
682 const struct cl_io_slice *slice);
683 int osc_io_read_start(const struct lu_env *env,
684 const struct cl_io_slice *slice);
685 int osc_io_write_start(const struct lu_env *env,
686 const struct cl_io_slice *slice);
687 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice);
688 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
689 struct cl_fsync_io *fio);
690 void osc_io_fsync_end(const struct lu_env *env,
691 const struct cl_io_slice *slice);
692 void osc_read_ahead_release(const struct lu_env *env, void *cbdata);
695 void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols,
697 void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
698 struct osc_lock *oscl);
699 int osc_lock_enqueue_wait(const struct lu_env *env, struct osc_object *obj,
700 struct osc_lock *oscl);
701 void osc_lock_set_writer(const struct lu_env *env, const struct cl_io *io,
702 struct cl_object *obj, struct osc_lock *oscl);
703 int osc_lock_print(const struct lu_env *env, void *cookie,
704 lu_printer_t p, const struct cl_lock_slice *slice);
705 void osc_lock_cancel(const struct lu_env *env,
706 const struct cl_lock_slice *slice);
707 void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
708 int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data);
709 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
711 /*****************************************************************************
713 * Accessors and type conversions.
716 static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
718 struct osc_thread_info *info;
720 info = lu_context_key_get(&env->le_ctx, &osc_key);
721 LASSERT(info != NULL);
725 static inline struct osc_session *osc_env_session(const struct lu_env *env)
727 struct osc_session *ses;
729 ses = lu_context_key_get(env->le_ses, &osc_session_key);
730 LASSERT(ses != NULL);
734 static inline struct osc_io *osc_env_io(const struct lu_env *env)
736 return &osc_env_session(env)->os_io;
739 static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
741 return container_of_safe(d, struct osc_device, od_cl.cd_lu_dev);
744 static inline struct obd_export *osc_export(const struct osc_object *obj)
746 return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->od_exp;
749 static inline struct client_obd *osc_cli(const struct osc_object *obj)
751 return &osc_export(obj)->exp_obd->u.cli;
754 static inline struct osc_object *cl2osc(const struct cl_object *obj)
756 return container_of_safe(obj, struct osc_object, oo_cl);
759 static inline struct cl_object *osc2cl(const struct osc_object *obj)
761 return (struct cl_object *)&obj->oo_cl;
764 static inline struct osc_device *obd2osc_dev(const struct obd_device *obd)
766 return container_of_safe(obd->obd_lu_dev, struct osc_device,
770 static inline struct lu_device *osc2lu_dev(struct osc_device *osc)
772 return &osc->od_cl.cd_lu_dev;
775 static inline struct lu_object *osc2lu(struct osc_object *osc)
777 return &osc->oo_cl.co_lu;
780 static inline struct osc_object *lu2osc(const struct lu_object *obj)
782 return container_of_safe(obj, struct osc_object, oo_cl.co_lu);
785 static inline struct osc_io *cl2osc_io(const struct lu_env *env,
786 const struct cl_io_slice *slice)
788 struct osc_io *oio = container_of(slice, struct osc_io, oi_cl);
790 LINVRNT(oio == osc_env_io(env));
794 static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
796 LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
797 if (mode == CLM_READ)
799 if (mode == CLM_WRITE)
804 static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
806 LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
814 static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
816 return container_of_safe(slice, struct osc_page, ops_cl);
819 static inline struct osc_page *oap2osc(struct osc_async_page *oap)
821 return container_of_safe(oap, struct osc_page, ops_oap);
824 static inline pgoff_t osc_index(struct osc_page *opg)
826 return opg->ops_cl.cpl_page->cp_osc_index;
829 static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
831 return oap2osc(oap)->ops_cl.cpl_page;
834 static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
836 return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
839 static inline struct osc_page *
840 osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
842 const struct cl_page_slice *slice;
844 LASSERT(osc != NULL);
845 slice = cl_object_page_slice(&osc->oo_cl, page);
846 return cl2osc_page(slice);
849 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
851 return container_of_safe(slice, struct osc_lock, ols_cl);
854 static inline int osc_io_srvlock(struct osc_io *oio)
856 return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
859 enum osc_extent_state {
860 OES_INV = 0, /** extent is just initialized or destroyed */
861 OES_ACTIVE = 1, /** process is using this extent */
862 OES_CACHE = 2, /** extent is ready for IO */
863 OES_LOCKING = 3, /** locking page to prepare IO */
864 OES_LOCK_DONE = 4, /** locking finished, ready to send */
865 OES_RPC = 5, /** in RPC */
866 OES_TRUNC = 6, /** being truncated */
871 * osc_extent data to manage dirty pages.
872 * osc_extent has the following attributes:
873 * 1. all pages in the same must be in one RPC in write back;
874 * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
875 * 3. must be covered by only 1 osc_lock;
876 * 4. exclusive. It's impossible to have overlapped osc_extent.
878 * The lifetime of an extent is from when the 1st page is dirtied to when
879 * all pages inside it are written out.
883 * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
886 /** red-black tree node */
887 struct rb_node oe_node;
888 /** osc_object of this extent */
889 struct osc_object *oe_obj;
890 /** refcount, removed from red-black tree if reaches zero. */
892 /** busy if non-zero */
894 /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
895 struct list_head oe_link;
896 /** state of this extent */
897 enum osc_extent_state oe_state;
898 /** flags for this extent. */
899 /** 0 is write, 1 is read */
900 unsigned int oe_rw:1,
901 /** sync extent, queued by osc_queue_sync_pages() */
903 /** set if this extent has partial, sync pages.
904 * Extents with partial page(s) can't merge with others in RPC */
908 /** an ACTIVE extent is going to be truncated, so when this extent
909 * is released, it will turn into TRUNC state instead of CACHE. */
911 /** this extent should be written asap and someone may wait for the
912 * write to finish. This bit is usually set along with urgent if
913 * the extent was CACHE state.
914 * fsync_wait extent can't be merged because new extent region may
915 * exceed fsync range. */
917 /** covering lock is being canceled */
919 /** this extent should be written back asap. set if one of pages is
920 * called by page WB daemon, or sync write or reading requests. */
922 /** Non-delay RPC should be used for this extent. */
924 /** direct IO pages */
926 /** this extent consists of RDMA only pages */
928 /** how many grants allocated for this extent.
929 * Grant allocated for this extent. There is no grant allocated
930 * for reading extents and sync write extents. */
931 unsigned int oe_grants;
932 /** # of dirty pages in this extent */
933 unsigned int oe_nr_pages;
934 /** list of pending oap pages. Pages in this list are NOT sorted. */
935 struct list_head oe_pages;
936 /** start and end index of this extent, include start and end
937 * themselves. Page offset here is the page index of osc_pages.
938 * oe_start is used as keyword for red-black tree. */
941 /** maximum ending index of this extent, this is limited by
942 * max_pages_per_rpc, lock extent and chunk size. */
944 /** waitqueue - for those who want to be notified if this extent's
945 * state has changed. */
946 wait_queue_head_t oe_waitq;
947 /** lock covering this extent */
948 struct ldlm_lock *oe_dlmlock;
949 /** terminator of this extent. Must be true if this extent is in IO. */
950 struct task_struct *oe_owner;
951 /** return value of writeback. If somebody is waiting for this extent,
952 * this value can be known by outside world. */
954 /** max pages per rpc when this extent was created */
955 unsigned int oe_mppr;
956 /** FLR: layout version when this osc_extent is publised */
957 __u32 oe_layout_version;
962 #endif /* LUSTRE_OSC_H */