4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 * lustre/include/lustre_osc.h
34 * OSC layer structures and methods common for both OSC and MDC.
36 * This file contains OSC interfaces used by OSC and MDC. Most of them
37 * were just moved from lustre/osc/osc_cl_internal.h for Data-on-MDT
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
42 * Author: Mikhail Pershin <mike.pershin@intel.com>
48 #include <libcfs/libcfs.h>
50 #include <cl_object.h>
51 #include <lustre_crypto.h>
57 struct osc_quota_info {
58 /** linkage for quota hash table */
59 struct hlist_node oqi_hash;
63 enum oap_async_flags {
64 ASYNC_READY = 0x1, /* ap_make_ready will not be called before
65 * this page is added to an rpc */
66 ASYNC_URGENT = 0x2, /* page must be put into RPC before return */
67 ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
68 to give the caller a chance to update
69 or cancel the size of the io */
75 /* add explicit padding to keep fields aligned despite "packed",
76 * which is needed to pack with following field in osc_page */
77 #define OAP_PAD_BITS (16 - OBD_BRW_WRITE - OAP_ASYNC_BITS)
78 struct osc_async_page {
79 unsigned short oap_page_off /* :PAGE_SHIFT */;
80 unsigned int oap_cmd:OBD_BRW_WRITE;
81 enum oap_async_flags oap_async_flags:OAP_ASYNC_BITS;
82 unsigned int oap_padding1:OAP_PAD_BITS; /* unused */
83 unsigned int oap_padding2; /* unused */
85 struct list_head oap_pending_item;
86 struct list_head oap_rpc_item;
90 struct ptlrpc_request *oap_request;
91 struct osc_object *oap_obj;
93 struct brw_page oap_brw_page;
94 } __attribute__((packed));
96 #define oap_page oap_brw_page.pg
97 #define oap_count oap_brw_page.count
98 #define oap_brw_flags oap_brw_page.flag
100 static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
102 BUILD_BUG_ON(OAP_ASYNC_MAX - 1 >= (1 << OAP_ASYNC_BITS));
103 return container_of(pga, struct osc_async_page, oap_brw_page);
107 struct cl_device osc_cl;
108 struct obd_export *osc_exp;
110 /* Write stats is actually protected by client_obd's lock. */
113 uint64_t os_lockless_writes; /* by bytes */
114 uint64_t os_lockless_reads; /* by bytes */
117 /* configuration item(s) */
118 time64_t osc_contention_time;
124 * State maintained by osc layer for each IO context.
128 struct cl_io_slice oi_cl;
129 /** true if this io is lockless. */
130 unsigned int oi_lockless:1,
131 /** true if this io is counted as active IO */
133 /** true if this io has CAP_SYS_RESOURCE */
134 oi_cap_sys_resource:1,
135 /** true if this io issued by readahead */
137 /** how many LRU pages are reserved for this IO */
138 unsigned long oi_lru_reserved;
140 /** active extents, we know how many bytes is going to be written,
141 * so having an active extent will prevent it from being fragmented */
142 struct osc_extent *oi_active;
143 /** partially truncated extent, we need to hold this extent to prevent
144 * page writeback from happening. */
145 struct osc_extent *oi_trunc;
146 /** write osc_lock for this IO, used by osc_extent_find(). */
147 struct osc_lock *oi_write_osclock;
148 struct osc_lock *oi_read_osclock;
150 struct osc_async_cbargs {
153 struct completion opc_sync;
158 * State maintained by osc layer for the duration of a system call.
164 #define OTI_PVEC_SIZE 256
165 struct osc_thread_info {
166 struct ldlm_res_id oti_resname;
167 union ldlm_policy_data oti_policy;
168 struct cl_attr oti_attr;
170 struct pagevec oti_pagevec;
171 void *oti_pvec[OTI_PVEC_SIZE];
173 * Fields used by cl_lock_discard_pages().
175 pgoff_t oti_next_index;
176 pgoff_t oti_fn_index; /* first non-overlapped index */
177 pgoff_t oti_ng_index; /* negative lock caching */
178 struct cl_sync_io oti_anchor;
179 struct cl_req_attr oti_req_attr;
180 struct lu_buf oti_ladvise_buf;
183 static inline __u64 osc_enq2ldlm_flags(__u32 enqflags)
187 CDEBUG(D_DLMTRACE, "flags: %x\n", enqflags);
189 LASSERT((enqflags & ~CEF_MASK) == 0);
191 if (enqflags & CEF_NONBLOCK)
192 result |= LDLM_FL_BLOCK_NOWAIT;
193 if (enqflags & CEF_GLIMPSE)
194 result |= LDLM_FL_HAS_INTENT|LDLM_FL_CBPENDING;
195 if (enqflags & CEF_DISCARD_DATA)
196 result |= LDLM_FL_AST_DISCARD_DATA;
197 if (enqflags & CEF_PEEK)
198 result |= LDLM_FL_TEST_LOCK;
199 if (enqflags & CEF_LOCK_MATCH)
200 result |= LDLM_FL_MATCH_LOCK;
201 if (enqflags & CEF_LOCK_NO_EXPAND)
202 result |= LDLM_FL_NO_EXPANSION;
203 if (enqflags & CEF_SPECULATIVE)
204 result |= LDLM_FL_SPECULATIVE;
208 typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
211 struct osc_enqueue_args {
212 struct obd_export *oa_exp;
213 enum ldlm_type oa_type;
214 enum ldlm_mode oa_mode;
216 osc_enqueue_upcall_f oa_upcall;
218 struct ost_lvb *oa_lvb;
219 struct lustre_handle oa_lockh;
224 * Bit flags for osc_dlm_lock_at_pageoff().
228 * Just check if the desired lock exists, it won't hold reference
231 OSC_DAP_FL_TEST_LOCK = BIT(0),
233 * Return the lock even if it is being canceled.
235 OSC_DAP_FL_CANCELING = BIT(1),
237 * check ast data is present, requested to cancel cb
239 OSC_DAP_FL_AST = BIT(2),
241 * look at right region for the desired lock
243 OSC_DAP_FL_RIGHT = BIT(3),
247 * The set of operations which are different for MDC and OSC objects
249 struct osc_object_operations {
250 void (*oto_build_res_name)(struct osc_object *osc,
251 struct ldlm_res_id *resname);
252 struct ldlm_lock* (*oto_dlmlock_at_pgoff)(const struct lu_env *env,
253 struct osc_object *obj,
255 enum osc_dap_flags dap_flags);
259 struct cl_object oo_cl;
260 struct lov_oinfo *oo_oinfo;
262 * True if locking against this stripe got -EUSERS.
265 ktime_t oo_contention_time;
266 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
268 * IO context used for invariant checks in osc_lock_has_pages().
270 struct cl_io oo_debug_io;
271 /** Serialization object for osc_object::oo_debug_io. */
272 struct mutex oo_debug_mutex;
275 * used by the osc to keep track of what objects to build into rpcs.
276 * Protected by client_obd->cli_loi_list_lock.
278 struct list_head oo_ready_item;
279 struct list_head oo_hp_ready_item;
280 struct list_head oo_write_item;
281 struct list_head oo_read_item;
284 * extent is a red black tree to manage (async) dirty pages.
286 struct rb_root oo_root;
288 * Manage write(dirty) extents.
290 struct list_head oo_hp_exts; /* list of hp extents */
291 struct list_head oo_urgent_exts; /* list of writeback extents */
292 struct list_head oo_full_exts;
294 struct list_head oo_reading_exts;
296 atomic_t oo_nr_reads;
297 atomic_t oo_nr_writes;
299 /** Protect extent tree. Will be used to protect
300 * oo_{read|write}_pages soon. */
304 * Radix tree for caching pages
306 spinlock_t oo_tree_lock;
307 struct radix_tree_root oo_tree;
308 unsigned long oo_npages;
310 /* Protect osc_lock this osc_object has */
311 struct list_head oo_ol_list;
312 spinlock_t oo_ol_spin;
314 /** number of active IOs of this object */
316 wait_queue_head_t oo_io_waitq;
318 const struct osc_object_operations *oo_obj_ops;
322 static inline void osc_build_res_name(struct osc_object *osc,
323 struct ldlm_res_id *resname)
325 return osc->oo_obj_ops->oto_build_res_name(osc, resname);
328 static inline struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
329 struct osc_object *obj,
331 enum osc_dap_flags flags)
333 return obj->oo_obj_ops->oto_dlmlock_at_pgoff(env, obj, index, flags);
336 static inline void osc_object_lock(struct osc_object *obj)
338 spin_lock(&obj->oo_lock);
341 static inline int osc_object_trylock(struct osc_object *obj)
343 return spin_trylock(&obj->oo_lock);
346 static inline void osc_object_unlock(struct osc_object *obj)
348 spin_unlock(&obj->oo_lock);
351 #define assert_osc_object_is_locked(obj) \
352 assert_spin_locked(&obj->oo_lock)
354 static inline void osc_object_set_contended(struct osc_object *obj)
356 obj->oo_contention_time = ktime_get();
358 obj->oo_contended = 1;
361 static inline void osc_object_clear_contended(struct osc_object *obj)
363 obj->oo_contended = 0;
367 * Lock "micro-states" for osc layer.
369 enum osc_lock_state {
378 * osc-private state of cl_lock.
380 * Interaction with DLM.
382 * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
383 * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
385 * This pointer is protected through a reference, acquired by
386 * osc_lock_upcall0(). Also, an additional reference is acquired by
387 * ldlm_lock_addref() call protecting the lock from cancellation, until
388 * osc_lock_unuse() releases it.
390 * Below is a description of how lock references are acquired and released
393 * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
394 * - ldlm_lock_create()
395 * - ldlm_lock_new(): initializes a lock with 2 references. One for
396 * the caller (released when reply from the server is received, or on
397 * error), and another for the hash table.
398 * - ldlm_lock_addref_internal(): protects the lock from cancellation.
400 * - When reply is received from the server (osc_enqueue_interpret())
401 * - ldlm_cli_enqueue_fini()
402 * - LDLM_LOCK_PUT(): releases caller reference acquired by
405 * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
406 * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
408 * - When lock is being cancelled (ldlm_lock_cancel())
409 * - ldlm_lock_destroy()
410 * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
413 * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
414 * either when lock is cancelled (osc_lock_blocking()), or when locks is
415 * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
416 * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
420 struct cl_lock_slice ols_cl;
421 /** Internal lock to protect states, etc. */
423 /** Owner sleeps on this channel for state change */
424 struct cl_sync_io *ols_owner;
425 /** waiting list for this lock to be cancelled */
426 struct list_head ols_waiting_list;
427 /** wait entry of ols_waiting_list */
428 struct list_head ols_wait_entry;
429 /** list entry for osc_object::oo_ol_list */
430 struct list_head ols_nextlock_oscobj;
432 /** underlying DLM lock */
433 struct ldlm_lock *ols_dlmlock;
434 /** DLM flags with which osc_lock::ols_lock was enqueued */
436 /** osc_lock::ols_lock handle */
437 struct lustre_handle ols_handle;
438 struct ldlm_enqueue_info ols_einfo;
439 enum osc_lock_state ols_state;
440 /** lock value block */
441 struct ost_lvb ols_lvb;
442 /** Lockless operations to be used by lockless lock */
443 const struct cl_lock_operations *ols_lockless_ops;
445 * true, if ldlm_lock_addref() was called against
446 * osc_lock::ols_lock. This is used for sanity checking.
448 * \see osc_lock::ols_has_ref
450 unsigned ols_hold :1,
452 * this is much like osc_lock::ols_hold, except that this bit is
453 * cleared _after_ reference in released in osc_lock_unuse(). This
454 * fine distinction is needed because:
456 * - if ldlm lock still has a reference, osc_ast_data_get() needs
457 * to return associated cl_lock (so that a flag is needed that is
458 * cleared after ldlm_lock_decref() returned), and
460 * - ldlm_lock_decref() can invoke blocking ast (for a
461 * LDLM_FL_CBPENDING lock), and osc_lock functions like
462 * osc_lock_cancel() called from there need to know whether to
463 * release lock reference (so that a flag is needed that is
464 * cleared before ldlm_lock_decref() is called).
468 * inherit the lockless attribute from top level cl_io.
469 * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
473 * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
474 * the EVAVAIL error as torerable, this will make upper logic happy
475 * to wait all glimpse locks to each OSTs to be completed.
476 * Glimpse lock converts to normal lock if the server lock is granted.
477 * Glimpse lock should be destroyed immediately after use.
481 * For async glimpse lock.
485 * for speculative locks - asynchronous glimpse locks and ladvise
486 * lockahead manual lock requests
488 * Used to tell osc layer to not wait for the ldlm reply from the
489 * server, so the osc lock will be short lived - It only exists to
490 * create the ldlm request and is not updated on request completion.
495 static inline int osc_lock_is_lockless(const struct osc_lock *ols)
497 return (ols->ols_cl.cls_ops == ols->ols_lockless_ops);
501 * Page state private for osc layer.
504 struct cl_page_slice ops_cl;
506 * Page queues used by osc to detect when RPC can be formed.
508 struct osc_async_page ops_oap;
510 * An offset within page from which next transfer starts. This is used
511 * by cl_page_clip() to submit partial page transfers.
513 unsigned int ops_from:PAGE_SHIFT,
515 * An offset within page at which next transfer ends(inclusive).
517 * \see osc_page::ops_from.
521 * Boolean, true iff page is under transfer. Used for sanity checking.
523 ops_transfer_pinned:1,
529 * Set if the page must be transferred with OBD_BRW_SRVLOCK.
533 * If the page is in osc_object::oo_tree.
537 * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
539 struct list_head ops_lru;
542 struct osc_brw_async_args {
544 int aa_requested_nob;
548 struct brw_page **aa_ppga;
549 struct client_obd *aa_cli;
550 struct list_head aa_oaps;
551 struct list_head aa_exts;
554 extern struct kmem_cache *osc_lock_kmem;
555 extern struct kmem_cache *osc_object_kmem;
556 extern struct kmem_cache *osc_thread_kmem;
557 extern struct kmem_cache *osc_session_kmem;
558 extern struct kmem_cache *osc_extent_kmem;
559 extern struct kmem_cache *osc_quota_kmem;
560 extern struct kmem_cache *osc_obdo_kmem;
562 extern struct lu_context_key osc_key;
563 extern struct lu_context_key osc_session_key;
565 #define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
568 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
569 struct cl_page *page, pgoff_t ind);
570 void osc_index2policy(union ldlm_policy_data *policy, const struct cl_object *obj,
571 pgoff_t start, pgoff_t end);
572 void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
573 void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
574 enum cl_req_type crt, int brw_flags);
575 int lru_queue_work(const struct lu_env *env, void *data);
576 long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
577 long target, bool force);
580 int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
582 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
583 struct cl_page *page, loff_t offset);
584 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
585 struct osc_page *ops, cl_commit_cbt cb);
586 int osc_page_cache_add(const struct lu_env *env, struct osc_page *opg,
587 struct cl_io *io, cl_commit_cbt cb);
588 int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
589 struct osc_page *ops);
590 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
591 struct osc_page *ops);
592 int osc_queue_sync_pages(const struct lu_env *env, struct cl_io *io,
593 struct osc_object *obj, struct list_head *list,
595 int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
596 __u64 size, struct osc_extent **extp);
597 void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
598 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
599 pgoff_t start, pgoff_t end, int hp, int discard);
600 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
601 pgoff_t start, pgoff_t end);
602 int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
603 struct osc_object *osc, int async);
604 static inline void osc_wake_cache_waiters(struct client_obd *cli)
606 wake_up(&cli->cl_cache_waiters);
609 static inline int osc_io_unplug_async(const struct lu_env *env,
610 struct client_obd *cli,
611 struct osc_object *osc)
613 return osc_io_unplug0(env, cli, osc, 1);
616 static inline void osc_io_unplug(const struct lu_env *env,
617 struct client_obd *cli,
618 struct osc_object *osc)
620 (void)osc_io_unplug0(env, cli, osc, 0);
623 typedef bool (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
624 void**, int, void *);
625 bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
626 struct osc_object *osc, pgoff_t start, pgoff_t end,
627 osc_page_gang_cbt cb, void *cbdata);
628 bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
629 void**, int, void *cbdata);
632 int osc_device_init(const struct lu_env *env, struct lu_device *d,
633 const char *name, struct lu_device *next);
634 struct lu_device *osc_device_fini(const struct lu_env *env,
635 struct lu_device *d);
636 struct lu_device *osc_device_free(const struct lu_env *env,
637 struct lu_device *d);
640 int osc_object_init(const struct lu_env *env, struct lu_object *obj,
641 const struct lu_object_conf *conf);
642 void osc_object_free(const struct lu_env *env, struct lu_object *obj);
643 int osc_lvb_print(const struct lu_env *env, void *cookie,
644 lu_printer_t p, const struct ost_lvb *lvb);
645 int osc_object_print(const struct lu_env *env, void *cookie,
646 lu_printer_t p, const struct lu_object *obj);
647 int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
648 struct cl_attr *attr);
649 int osc_attr_update(const struct lu_env *env, struct cl_object *obj,
650 const struct cl_attr *attr, unsigned valid);
651 int osc_object_glimpse(const struct lu_env *env, const struct cl_object *obj,
652 struct ost_lvb *lvb);
653 int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
654 int osc_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
655 ldlm_iterator_t iter, void *data);
656 int osc_object_prune(const struct lu_env *env, struct cl_object *obj);
659 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd);
660 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg);
661 int osc_precleanup_common(struct obd_device *obd);
662 int osc_cleanup_common(struct obd_device *obd);
663 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
664 u32 keylen, void *key, u32 vallen, void *val,
665 struct ptlrpc_request_set *set);
666 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
667 struct hlist_node *hnode, void *arg);
668 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
669 struct obd_device *obd, struct obd_uuid *cluuid,
670 struct obd_connect_data *data, void *localdata);
671 int osc_disconnect(struct obd_export *exp);
672 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
673 obd_enqueue_update_f upcall, void *cookie);
674 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
675 obd_enqueue_update_f upcall, void *cookie, int mode);
676 void osc_update_next_shrink(struct client_obd *cli);
677 void osc_schedule_grant_work(void);
680 int osc_io_submit(const struct lu_env *env, const struct cl_io_slice *ios,
681 enum cl_req_type crt, struct cl_2queue *queue);
682 int osc_io_commit_async(const struct lu_env *env,
683 const struct cl_io_slice *ios,
684 struct cl_page_list *qin, int from, int to,
686 void osc_io_extent_release(const struct lu_env *env,
687 const struct cl_io_slice *ios);
688 int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios);
689 void osc_io_iter_fini(const struct lu_env *env,
690 const struct cl_io_slice *ios);
691 void osc_io_rw_iter_fini(const struct lu_env *env,
692 const struct cl_io_slice *ios);
693 int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios);
694 void osc_io_setattr_end(const struct lu_env *env,
695 const struct cl_io_slice *slice);
696 int osc_io_read_start(const struct lu_env *env,
697 const struct cl_io_slice *slice);
698 int osc_io_write_start(const struct lu_env *env,
699 const struct cl_io_slice *slice);
700 void osc_io_end(const struct lu_env *env, const struct cl_io_slice *slice);
701 int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
702 struct cl_fsync_io *fio);
703 void osc_io_fsync_end(const struct lu_env *env,
704 const struct cl_io_slice *slice);
705 void osc_read_ahead_release(const struct lu_env *env, struct cl_read_ahead *ra);
706 int osc_io_lseek_start(const struct lu_env *env,
707 const struct cl_io_slice *slice);
708 void osc_io_lseek_end(const struct lu_env *env,
709 const struct cl_io_slice *slice);
710 int osc_io_lru_reserve(const struct lu_env *env, const struct cl_io_slice *ios,
711 loff_t pos, size_t count);
712 int osc_punch_start(const struct lu_env *env, struct cl_io *io,
713 struct cl_object *obj);
716 void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols,
718 void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
719 struct osc_lock *oscl);
720 int osc_lock_enqueue_wait(const struct lu_env *env, struct osc_object *obj,
721 struct osc_lock *oscl);
722 void osc_lock_set_writer(const struct lu_env *env, const struct cl_io *io,
723 struct cl_object *obj, struct osc_lock *oscl);
724 void osc_lock_set_reader(const struct lu_env *env, const struct cl_io *io,
725 struct cl_object *obj, struct osc_lock *oscl);
726 int osc_lock_print(const struct lu_env *env, void *cookie,
727 lu_printer_t p, const struct cl_lock_slice *slice);
728 void osc_lock_cancel(const struct lu_env *env,
729 const struct cl_lock_slice *slice);
730 void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
731 int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data);
732 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
734 /*****************************************************************************
736 * Accessors and type conversions.
739 static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
741 struct osc_thread_info *info;
743 info = lu_context_key_get(&env->le_ctx, &osc_key);
744 LASSERT(info != NULL);
748 static inline struct osc_session *osc_env_session(const struct lu_env *env)
750 struct osc_session *ses;
752 ses = lu_context_key_get(env->le_ses, &osc_session_key);
753 LASSERT(ses != NULL);
757 static inline struct osc_io *osc_env_io(const struct lu_env *env)
759 return &osc_env_session(env)->os_io;
762 static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
764 return container_of_safe(d, struct osc_device, osc_cl.cd_lu_dev);
767 static inline struct obd_export *osc_export(const struct osc_object *obj)
769 return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->osc_exp;
772 static inline struct client_obd *osc_cli(const struct osc_object *obj)
774 return &osc_export(obj)->exp_obd->u.cli;
777 static inline struct osc_object *cl2osc(const struct cl_object *obj)
779 return container_of_safe(obj, struct osc_object, oo_cl);
782 static inline struct cl_object *osc2cl(const struct osc_object *obj)
784 return (struct cl_object *)&obj->oo_cl;
787 static inline struct osc_device *obd2osc_dev(const struct obd_device *obd)
789 return container_of_safe(obd->obd_lu_dev, struct osc_device,
793 static inline struct lu_device *osc2lu_dev(struct osc_device *osc)
795 return &osc->osc_cl.cd_lu_dev;
798 static inline struct lu_object *osc2lu(struct osc_object *osc)
800 return &osc->oo_cl.co_lu;
803 static inline struct osc_object *lu2osc(const struct lu_object *obj)
805 return container_of_safe(obj, struct osc_object, oo_cl.co_lu);
808 static inline struct osc_io *cl2osc_io(const struct lu_env *env,
809 const struct cl_io_slice *slice)
811 struct osc_io *oio = container_of(slice, struct osc_io, oi_cl);
813 LINVRNT(oio == osc_env_io(env));
817 static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
819 LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
820 if (mode == CLM_READ)
822 if (mode == CLM_WRITE)
827 static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
829 LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
837 static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
839 return container_of_safe(slice, struct osc_page, ops_cl);
842 static inline struct osc_page *oap2osc(struct osc_async_page *oap)
844 return container_of_safe(oap, struct osc_page, ops_oap);
847 static inline pgoff_t osc_index(struct osc_page *opg)
849 return opg->ops_oap.oap_obj_off >> PAGE_SHIFT;
852 static inline struct osc_object *osc_page_object(struct osc_page *ops)
854 return ops->ops_oap.oap_obj;
857 static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
859 return oap2osc(oap)->ops_cl.cpl_page;
862 static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
864 return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
867 static inline struct osc_page *
868 osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
870 const struct cl_page_slice *slice;
872 LASSERT(osc != NULL);
873 slice = cl_object_page_slice(&osc->oo_cl, page);
874 return cl2osc_page(slice);
877 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
879 return container_of_safe(slice, struct osc_lock, ols_cl);
882 static inline int osc_io_srvlock(struct osc_io *oio)
884 return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
887 enum osc_extent_state {
888 OES_INV = 0, /** extent is just initialized or destroyed */
889 OES_ACTIVE = 1, /** process is using this extent */
890 OES_CACHE = 2, /** extent is ready for IO */
891 OES_LOCKING = 3, /** locking page to prepare IO */
892 OES_LOCK_DONE = 4, /** locking finished, ready to send */
893 OES_RPC = 5, /** in RPC */
894 OES_TRUNC = 6, /** being truncated */
899 * osc_extent data to manage dirty pages.
900 * osc_extent has the following attributes:
901 * 1. all pages in the same must be in one RPC in write back;
902 * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
903 * 3. must be covered by only 1 osc_lock;
904 * 4. exclusive. It's impossible to have overlapped osc_extent.
906 * The lifetime of an extent is from when the 1st page is dirtied to when
907 * all pages inside it are written out.
911 * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
914 /** red-black tree node */
915 struct rb_node oe_node;
916 /** osc_object of this extent */
917 struct osc_object *oe_obj;
918 /** refcount, removed from red-black tree if reaches zero. */
920 /** busy if non-zero */
922 /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
923 struct list_head oe_link;
924 /** state of this extent */
925 enum osc_extent_state oe_state;
926 /** flags for this extent. */
927 /** 0 is write, 1 is read */
928 unsigned int oe_rw:1,
929 /** sync extent, queued by osc_queue_sync_pages() */
931 /** set if this extent has partial, sync pages.
932 * Extents with partial page(s) can't merge with others in RPC */
936 /** an ACTIVE extent is going to be truncated, so when this extent
937 * is released, it will turn into TRUNC state instead of CACHE. */
939 /** this extent should be written asap and someone may wait for the
940 * write to finish. This bit is usually set along with urgent if
941 * the extent was CACHE state.
942 * fsync_wait extent can't be merged because new extent region may
943 * exceed fsync range. */
945 /** covering lock is being canceled */
947 /** this extent should be written back asap. set if one of pages is
948 * called by page WB daemon, or sync write or reading requests. */
950 /** Non-delay RPC should be used for this extent. */
952 /** direct IO pages */
954 /** this extent consists of pages that are not directly accessible
957 /** how many grants allocated for this extent.
958 * Grant allocated for this extent. There is no grant allocated
959 * for reading extents and sync write extents. */
960 unsigned int oe_grants;
961 /** # of dirty pages in this extent */
962 unsigned int oe_nr_pages;
963 /** list of pending oap pages. Pages in this list are NOT sorted. */
964 struct list_head oe_pages;
965 /** start and end index of this extent, include start and end
966 * themselves. Page offset here is the page index of osc_pages.
967 * oe_start is used as keyword for red-black tree. */
970 /** maximum ending index of this extent, this is limited by
971 * max_pages_per_rpc, lock extent and chunk size. */
973 /** waitqueue - for those who want to be notified if this extent's
974 * state has changed. */
975 wait_queue_head_t oe_waitq;
976 /** lock covering this extent */
977 struct ldlm_lock *oe_dlmlock;
978 /** terminator of this extent. Must be true if this extent is in IO. */
979 struct task_struct *oe_owner;
980 /** return value of writeback. If somebody is waiting for this extent,
981 * this value can be known by outside world. */
983 /** max pages per rpc when this extent was created */
984 unsigned int oe_mppr;
985 /** FLR: layout version when this osc_extent is publised */
986 __u32 oe_layout_version;
991 #endif /* LUSTRE_OSC_H */