X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre_dlm.h;h=bd4d87df29816680fcb2f04b58040ac6fa0a0e06;hb=f0d608786a27dfb8dddf06d6b086b491749557f1;hp=c8f321dbe30c528cfefad6bcba0044e3522297f1;hpb=f2a9374170e4522b9d2ac3b7096cf2912339d480;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index c8f321d..bd4d87d 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -75,6 +75,7 @@ struct obd_device; #endif #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000)) #define LDLM_CTIME_AGE_LIMIT (10) +#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024 typedef enum { ELDLM_OK = 0, @@ -121,14 +122,15 @@ typedef enum { #define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */ #define LDLM_FL_CANCELING 0x002000 /* lock cancel has already been sent */ #define LDLM_FL_LOCAL 0x004000 /* local lock (ie, no srv/cli split) */ -/* was LDLM_FL_WARN until 2.0.0 0x008000 */ #define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */ #define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait * indefinitely */ /* file & record locking */ -#define LDLM_FL_BLOCK_NOWAIT 0x040000 // server told not to wait if blocked +#define LDLM_FL_BLOCK_NOWAIT 0x040000 /* server told not to wait if blocked. + * For AGL, OST will not send glimpse + * callback. */ #define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that @@ -152,6 +154,10 @@ typedef enum { * list. */ #define LDLM_FL_KMS_IGNORE 0x200000 +/* Don't put lock into the LRU list, so that it is not canceled due to aging. + * Used by MGC locks, they are cancelled only at unmount or by callback. */ +#define LDLM_FL_NO_LRU 0x400000 + /* Immediatelly cancel such locks when they block some other locks. Send * cancel notification to original lock holder, but expect no reply. This is * for clients (like liblustre) that cannot be expected to reliably response @@ -171,8 +177,6 @@ typedef enum { * w/o involving separate thread. in order to decrease cs rate */ #define LDLM_FL_ATOMIC_CB 0x4000000 -/* was LDLM_FL_ASYNC until 2.0.0 0x8000000 */ - /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir, * such that server send blocking ast for conflict locks to this client for * the 1st operation, whereas the 2nd operation has canceled this lock and @@ -508,12 +512,20 @@ struct ldlm_namespace { * Limit size of nolock requests, in bytes. */ unsigned ns_max_nolock_size; + + /** + * Limit of parallel AST RPC count. + */ + unsigned ns_max_parallel_ast; + /* callback to cancel locks before replaying it during recovery */ ldlm_cancel_for_recovery ns_cancel_for_recovery; /** * ldlm lock stats */ struct lprocfs_stats *ns_stats; + + unsigned ns_stopping:1; /* namespace cleanup */ }; static inline int ns_is_client(struct ldlm_namespace *ns) @@ -687,18 +699,6 @@ struct ldlm_lock { __u64 l_flags; __u32 l_readers; __u32 l_writers; - /* - * Set for locks that were removed from class hash table and will be - * destroyed when last reference to them is released. Set by - * ldlm_lock_destroy_internal(). - * - * Protected by lock and resource locks. - */ - __u8 l_destroyed; - /** - * flag whether this is a server namespace lock - */ - __u8 l_ns_srv; /** * If the lock is granted, a process sleeps on this waitq to learn when * it's no longer in use. If the lock is not granted, a process sleeps @@ -719,6 +719,20 @@ struct ldlm_lock { struct ldlm_extent l_req_extent; + unsigned int l_failed:1, + /* + * Set for locks that were removed from class hash table and will be + * destroyed when last reference to them is released. Set by + * ldlm_lock_destroy_internal(). + * + * Protected by lock and resource locks. + */ + l_destroyed:1, + /** + * flag whether this is a server namespace lock. + */ + l_ns_srv:1; + /* * Client-side-only members. */ @@ -750,6 +764,7 @@ struct ldlm_lock { */ __u32 l_pid; + int l_bl_ast_run; /** * For ldlm_add_ast_work_item(). */ @@ -764,7 +779,6 @@ struct ldlm_lock { cfs_list_t l_rk_ast; struct ldlm_lock *l_blocking_lock; - int l_bl_ast_run; /** * Protected by lr_lock, linkages to "skip lists". @@ -782,6 +796,11 @@ struct ldlm_lock { /** referenced export object */ struct obd_export *l_exp_refs_target; #endif + /** export blocking dlm lock list, protected by + * l_export->exp_bl_list_lock. + * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock + * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock. */ + cfs_list_t l_exp_list; }; struct ldlm_resource { @@ -868,7 +887,6 @@ struct ldlm_enqueue_info { void *ei_cb_gl; /* lock glimpse callback */ void *ei_cb_wg; /* lock weigh callback */ void *ei_cbdata; /* Data to be passed into callbacks. */ - short ei_async:1; /* async request */ }; extern struct obd_ops ldlm_obd_ops; @@ -978,6 +996,7 @@ void ldlm_lock2handle(const struct ldlm_lock *lock, struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags); void ldlm_cancel_callback(struct ldlm_lock *); int ldlm_lock_remove_from_lru(struct ldlm_lock *); +int ldlm_lock_set_data(struct lustre_handle *, void *); static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h) { @@ -1065,12 +1084,16 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode); int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode); +void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); +void ldlm_lock_fail_match(struct ldlm_lock *lock); void ldlm_lock_allow_match(struct ldlm_lock *lock); void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags, const struct ldlm_res_id *, ldlm_type_t type, ldlm_policy_data_t *, ldlm_mode_t mode, struct lustre_handle *, int unref); +ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits); struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, __u32 *flags); void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);