X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre_dlm.h;h=738154b78c2ee171f328495ba4603952d36c2caf;hb=e5abcf83c0575b8a79594c1eb9ea727739d91522;hp=9d6058a0ea9e47ea19ebb64a0401aefd4cb02baf;hpb=ebbdda942b97a7b144774cfe838521a178ce4847;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 9d6058a..738154b 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -23,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2010, 2016, Intel Corporation. + * Copyright (c) 2010, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -56,11 +56,17 @@ struct obd_ops; struct obd_device; +extern struct kset *ldlm_ns_kset; +extern struct kset *ldlm_svc_kset; + #define OBD_LDLM_DEVICENAME "ldlm" #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus()) -#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */ +#define LDLM_DEFAULT_MAX_ALIVE 3900 /* 3900 seconds ~65 min */ #define LDLM_CTIME_AGE_LIMIT (10) +/* if client lock is unused for that time it can be cancelled if any other + * client shows interest in that lock, e.g. glimpse is occured. */ +#define LDLM_DIRTY_AGE_LIMIT (10) #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024 /** @@ -229,8 +235,8 @@ struct ldlm_pool_ops { * This feature is commonly referred to as lru_resize. */ struct ldlm_pool { - /** Pool proc directory. */ - struct proc_dir_entry *pl_proc_dir; + /** Pool debugfs directory. */ + struct dentry *pl_debugfs_entry; /** Pool name, must be long enough to hold compound proc entry name. */ char pl_name[100]; /** Lock for protecting SLV/CLV updates. */ @@ -251,20 +257,25 @@ struct ldlm_pool { * server_slv * lock_volume_factor. */ atomic_t pl_lock_volume_factor; /** Time when last SLV from server was obtained. */ - time_t pl_recalc_time; + time64_t pl_recalc_time; /** Recalculation period for pool. */ - time_t pl_recalc_period; + time64_t pl_recalc_period; /** Recalculation and shrink operations. */ struct ldlm_pool_ops *pl_ops; /** Number of planned locks for next period. */ int pl_grant_plan; /** Pool statistics. */ struct lprocfs_stats *pl_stats; + + /* sysfs object */ + struct kobject pl_kobj; + struct completion pl_kobj_unregister; }; -typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **, - void *req_cookie, enum ldlm_mode mode, - __u64 flags, void *data); +typedef int (*ldlm_res_policy)(const struct lu_env *env, + struct ldlm_namespace *, + struct ldlm_lock **, void *req_cookie, + enum ldlm_mode mode, __u64 flags, void *data); typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock); @@ -282,15 +293,16 @@ typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock); * of ldlm_[res_]lvbo_[init,update,fill]() functions. */ struct ldlm_valblock_ops { - int (*lvbo_init)(struct ldlm_resource *res); - int (*lvbo_update)(struct ldlm_resource *res, - struct ptlrpc_request *r, - int increase); - int (*lvbo_free)(struct ldlm_resource *res); + int (*lvbo_init)(const struct lu_env *env, struct ldlm_resource *res); + int (*lvbo_update)(const struct lu_env *env, struct ldlm_resource *res, + struct ldlm_lock *lock, struct ptlrpc_request *r, + int increase); + int (*lvbo_free)(struct ldlm_resource *res); /* Return size of lvb data appropriate RPC size can be reserved */ int (*lvbo_size)(struct ldlm_lock *lock); /* Called to fill in lvb data to RPC buffer @buf */ - int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen); + int (*lvbo_fill)(const struct lu_env *env, struct ldlm_lock *lock, + void *buf, int *buflen); }; /** @@ -369,6 +381,9 @@ struct ldlm_namespace { /** Flag indicating if namespace is on client instead of server */ enum ldlm_side ns_client; + /** name of this namespace */ + char *ns_name; + /** Resource hash table for namespace. */ struct cfs_hash *ns_rs_hash; @@ -387,8 +402,8 @@ struct ldlm_namespace { /** Client side original connect flags supported by server. */ __u64 ns_orig_connect_flags; - /* namespace proc dir entry */ - struct proc_dir_entry *ns_proc_dir_entry; + /* namespace debugfs dir entry */ + struct dentry *ns_debugfs_entry; /** * Position in global namespace list linking all namespaces on @@ -409,6 +424,7 @@ struct ldlm_namespace { struct list_head ns_unused_list; /** Number of locks in the LRU list above */ int ns_nr_unused; + struct list_head *ns_last_pos; /** * Maximum number of locks permitted in the LRU. If 0, means locks @@ -416,8 +432,10 @@ struct ldlm_namespace { * controlled by available memory on this client and on server. */ unsigned int ns_max_unused; + /** Maximum allowed age (last used time) for locks in the LRU */ - unsigned int ns_max_age; + ktime_t ns_max_age; + /** * Server only: number of times we evicted clients due to lack of reply * to ASTs. @@ -429,14 +447,20 @@ struct ldlm_namespace { * This allows the client to start caching negative dentries * for a directory and may save an RPC for a later stat. */ - unsigned int ns_ctime_age_limit; - + time64_t ns_ctime_age_limit; + /** + * Number of seconds since the lock was last used. The client may + * cancel the lock limited by this age and flush related data if + * any other client shows interest in it doing glimpse request. + * This allows to cache stat data locally for such files early. + */ + time64_t ns_dirty_age_limit; /** * Used to rate-limit ldlm_namespace_dump calls. * \see ldlm_namespace_dump. Increased by 10 seconds every time * it is called. */ - cfs_time_t ns_next_dump; + time64_t ns_next_dump; /** "policy" function that does actual lock conflict determination */ ldlm_res_policy ns_policy; @@ -474,7 +498,7 @@ struct ldlm_namespace { * The resources in this namespace remember contended state during * \a ns_contention_time, in seconds. */ - unsigned ns_contention_time; + time64_t ns_contention_time; /** * Limit size of contended extent locks, in bytes. @@ -506,6 +530,9 @@ struct ldlm_namespace { * Which bucket should we start with the lock reclaim. */ int ns_reclaim_start; + + struct kobject ns_kobj; /* sysfs object */ + struct completion ns_kobj_unregister; }; /** @@ -578,10 +605,27 @@ struct ldlm_glimpse_work { __u32 gl_flags;/* see LDLM_GL_WORK_* below */ union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in * glimpse callback request */ + ptlrpc_interpterer_t gl_interpret_reply; + void *gl_interpret_data; +}; + +struct ldlm_cb_set_arg { + struct ptlrpc_request_set *set; + int type; /* LDLM_{CP,BL,GL}_CALLBACK */ + atomic_t restart; + struct list_head *list; + union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */ + ptlrpc_interpterer_t gl_interpret_reply; + void *gl_interpret_data; }; -/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */ -#define LDLM_GL_WORK_NOFREE 0x1 +struct ldlm_cb_async_args { + struct ldlm_cb_set_arg *ca_set_arg; + struct ldlm_lock *ca_lock; +}; + +/** The ldlm_glimpse_work was slab allocated & must be freed accordingly.*/ +#define LDLM_GL_WORK_SLAB_ALLOCATED 0x1 /** Interval node data for each LDLM_EXTENT lock. */ struct ldlm_interval { @@ -612,6 +656,7 @@ enum ldlm_cancel_flags { LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */ LCF_BL_AST = 0x4, /* Cancel LDLM_FL_BL_AST locks in the same RPC */ + LCF_CONVERT = 0x8, /* Try to convert IBITS lock before cancel */ }; struct ldlm_flock { @@ -694,7 +739,7 @@ struct ldlm_lock { struct list_head l_lru; /** * Linkage to resource's lock queues according to current lock state. - * (could be granted, waiting or converting) + * (could be granted or waiting) * Protected by lr_lock in struct ldlm_resource. */ struct list_head l_res_link; @@ -786,16 +831,9 @@ struct ldlm_lock { wait_queue_head_t l_waitq; /** - * Seconds. It will be updated if there is any activity related to - * the lock, e.g. enqueue the lock or send blocking AST. - */ - time64_t l_last_activity; - - /** - * Time last used by e.g. being matched by lock match. - * Jiffies. Should be converted to time if needed. + * Time, in nanoseconds, last used by e.g. being matched by lock match. */ - cfs_time_t l_last_used; + ktime_t l_last_used; /** Originally requested extent for the extent lock. */ struct ldlm_extent l_req_extent; @@ -816,6 +854,19 @@ struct ldlm_lock { /** Private storage for lock user. Opaque to LDLM. */ void *l_ast_data; + union { + /** + * Seconds. It will be updated if there is any activity related to + * the lock at client, e.g. enqueue the lock. For server it is the + * time when blocking ast was sent. + */ + time64_t l_activity; + time64_t l_blast_sent; + }; + + /* separate ost_lvb used mostly by Data-on-MDT for now. + * It is introduced to don't mix with layout lock data. */ + struct ost_lvb l_ost_lvb; /* * Server-side-only members. */ @@ -832,8 +883,7 @@ struct ldlm_lock { * The lists this could be linked into are: * waiting_locks_list (protected by waiting_locks_spinlock), * then if the lock timed out, it is moved to - * expired_lock_thread.elt_expired_locks for further processing. - * Protected by elt_lock. + * expired_lock_list for further processing. */ struct list_head l_pending_chain; @@ -844,7 +894,7 @@ struct ldlm_lock { * under this lock. * \see ost_rw_prolong_locks */ - cfs_time_t l_callback_timeout; + time64_t l_callback_timeout; /** Local PID of process which created this lock. */ __u32 l_pid; @@ -903,6 +953,15 @@ struct ldlm_lock { * which is for server. */ #define l_slc_link l_rk_ast +#define HANDLE_MAP_SIZE ((LMV_MAX_STRIPE_COUNT + 7) >> 3) + +struct lustre_handle_array { + unsigned int ha_count; + /* ha_map is used as bit flag to indicate handle is remote or local */ + char ha_map[HANDLE_MAP_SIZE]; + struct lustre_handle ha_handles[0]; +}; + /** * LDLM resource description. * Basically, resource is a representation for a single object. @@ -934,8 +993,6 @@ struct ldlm_resource { * @{ */ /** List of locks in granted state */ struct list_head lr_granted; - /** List of locks waiting to change their granted mode (converted) */ - struct list_head lr_converting; /** * List of locks that could not be granted due to conflicts and * that are waiting for conflicts to go away */ @@ -953,8 +1010,9 @@ struct ldlm_resource { union { /** * When the resource was considered as contended, - * used only on server side. */ - cfs_time_t lr_contention_time; + * used only on server side. + */ + time64_t lr_contention_time; /** * Associated inode, used only on client side. */ @@ -985,10 +1043,16 @@ static inline bool ldlm_has_layout(struct ldlm_lock *lock) lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT; } +static inline bool ldlm_has_dom(struct ldlm_lock *lock) +{ + return lock->l_resource->lr_type == LDLM_IBITS && + lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_DOM; +} + static inline char * ldlm_ns_name(struct ldlm_namespace *ns) { - return ns->ns_rs_hash->hs_name; + return ns->ns_name; } static inline struct ldlm_namespace * @@ -1015,7 +1079,8 @@ ldlm_lock_to_ns_at(struct ldlm_lock *lock) return &lock->l_resource->lr_ns_bucket->nsb_at_estimate; } -static inline int ldlm_lvbo_init(struct ldlm_resource *res) +static inline int ldlm_lvbo_init(const struct lu_env *env, + struct ldlm_resource *res) { struct ldlm_namespace *ns = ldlm_res_to_ns(res); int rc = 0; @@ -1030,7 +1095,7 @@ static inline int ldlm_lvbo_init(struct ldlm_resource *res) mutex_unlock(&res->lr_lvb_mutex); return 0; } - rc = ns->ns_lvbo->lvbo_init(res); + rc = ns->ns_lvbo->lvbo_init(env, res); if (rc < 0) { CDEBUG(D_DLMTRACE, "lvbo_init failed for resource : rc = %d\n", rc); @@ -1056,7 +1121,8 @@ static inline int ldlm_lvbo_size(struct ldlm_lock *lock) return 0; } -static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) +static inline int ldlm_lvbo_fill(const struct lu_env *env, + struct ldlm_lock *lock, void *buf, int *len) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); int rc; @@ -1064,13 +1130,13 @@ static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) if (ns->ns_lvbo != NULL) { LASSERT(ns->ns_lvbo->lvbo_fill != NULL); /* init lvb now if not already */ - rc = ldlm_lvbo_init(lock->l_resource); + rc = ldlm_lvbo_init(env, lock->l_resource); if (rc < 0) { CERROR("lock %p: delayed lvb init failed (rc %d)", lock, rc); return rc; } - return ns->ns_lvbo->lvbo_fill(lock, buf, len); + return ns->ns_lvbo->lvbo_fill(env, lock, buf, len); } return 0; } @@ -1097,8 +1163,8 @@ struct ldlm_enqueue_info { void *ei_cb_gl; /** lock glimpse callback */ void *ei_cbdata; /** Data to be passed into callbacks. */ void *ei_namespace; /** lock namespace **/ - unsigned int ei_enq_slave:1, /** whether enqueue slave stripes */ - ei_nonblock:1; /** non block enqueue */ + u64 ei_inodebits; /** lock inode bits **/ + unsigned int ei_enq_slave:1; /** whether enqueue slave stripes */ }; #define ei_res_id ei_cb_gl @@ -1164,8 +1230,41 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, # define LDLM_ERROR(lock, fmt, a...) ((void)0) #endif +/* + * Three intentions can be used for the policy functions in + * ldlm_processing_policy. + * + * LDLM_PROCESS_RESCAN: + * + * It's used when policy functions are called from ldlm_reprocess_queue() to + * reprocess the wait list and try to grant locks, blocking ASTs + * have already been sent in this situation, completion ASTs need be sent for + * the locks being granted. + * + * LDLM_PROCESS_ENQUEUE: + * + * It's used when policy functions are called from ldlm_lock_enqueue() to + * process the wait list for handling an enqueue request, blocking + * ASTs have not been sent yet, so list of conflicting locks would be + * collected and ASTs sent. + * + * LDLM_PROCESS_RECOVERY: + * + * It's used when policy functions are called from ldlm_reprocess_queue() to + * reprocess the wait list when recovery done. In case of blocking + * ASTs are lost before recovery, it needs not only to grant locks if + * available, but also send blocking ASTs to the locks doesn't have AST sent + * flag. Completion ASTs need be sent for the locks being granted. + */ +enum ldlm_process_intention { + LDLM_PROCESS_RESCAN = 0, + LDLM_PROCESS_ENQUEUE = 1, + LDLM_PROCESS_RECOVERY = 2, +}; + typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags, - int first_enq, enum ldlm_error *err, + enum ldlm_process_intention intention, + enum ldlm_error *err, struct list_head *work_list); /** @@ -1204,7 +1303,7 @@ struct ldlm_prolong_args { struct ldlm_res_id lpa_resid; struct ldlm_extent lpa_extent; enum ldlm_mode lpa_mode; - int lpa_timeout; + time64_t lpa_timeout; int lpa_locks_cnt; int lpa_blocks_cnt; }; @@ -1238,14 +1337,11 @@ int ldlm_glimpse_locks(struct ldlm_resource *res, * MDT or OST to pass through LDLM requests to LDLM for handling * @{ */ -int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback, - ldlm_blocking_callback, ldlm_glimpse_callback); int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req, - const struct ldlm_request *dlm_req, - const struct ldlm_callback_suite *cbs); -int ldlm_handle_convert(struct ptlrpc_request *req); + const struct ldlm_request *dlm_req, + const struct ldlm_callback_suite *cbs); int ldlm_handle_convert0(struct ptlrpc_request *req, - const struct ldlm_request *dlm_req); + const struct ldlm_request *dlm_req); int ldlm_handle_cancel(struct ptlrpc_request *req); int ldlm_request_cancel(struct ptlrpc_request *req, const struct ldlm_request *dlm_req, @@ -1253,10 +1349,10 @@ int ldlm_request_cancel(struct ptlrpc_request *req, /** @} ldlm_handlers */ void ldlm_revoke_export_locks(struct obd_export *exp); -unsigned int ldlm_bl_timeout(struct ldlm_lock *lock); +time64_t ldlm_bl_timeout(struct ldlm_lock *lock); #endif int ldlm_del_waiting_lock(struct ldlm_lock *lock); -int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout); +int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout); int ldlm_get_ref(void); void ldlm_put_ref(void); int ldlm_init_export(struct obd_export *exp); @@ -1301,26 +1397,35 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags) * Update Lock Value Block Operations (LVBO) on a resource taking into account * data from request \a r */ -static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, - struct ptlrpc_request *req, int increase) +static inline int ldlm_lvbo_update(const struct lu_env *env, + struct ldlm_resource *res, + struct ldlm_lock *lock, + struct ptlrpc_request *req, int increase) { + struct ldlm_namespace *ns = ldlm_res_to_ns(res); int rc; /* delayed lvb init may be required */ - rc = ldlm_lvbo_init(res); + rc = ldlm_lvbo_init(env, res); if (rc < 0) { CERROR("delayed lvb init failed (rc %d)\n", rc); return rc; } - if (ldlm_res_to_ns(res)->ns_lvbo && - ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) { - return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, req, - increase); - } + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_update) + return ns->ns_lvbo->lvbo_update(env, res, lock, req, increase); + return 0; } +static inline int ldlm_res_lvbo_update(const struct lu_env *env, + struct ldlm_resource *res, + struct ptlrpc_request *req, + int increase) +{ + return ldlm_lvbo_update(env, res, NULL, req, increase); +} + int ldlm_error2errno(enum ldlm_error error); enum ldlm_error ldlm_errno2error(int err_no); /* don't call it `errno': this * confuses user-space. */ @@ -1389,12 +1494,10 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, struct lustre_handle *, int unref); enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh, __u64 *bits); -struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, - enum ldlm_mode new_mode, __u32 *flags); -void ldlm_lock_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode); +void ldlm_lock_mode_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode); void ldlm_lock_cancel(struct ldlm_lock *lock); void ldlm_reprocess_all(struct ldlm_resource *res); -void ldlm_reprocess_all_ns(struct ldlm_namespace *ns); +void ldlm_reprocess_recovery_done(struct ldlm_namespace *ns); void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh); void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); @@ -1415,12 +1518,40 @@ void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client); void ldlm_namespace_get(struct ldlm_namespace *ns); void ldlm_namespace_put(struct ldlm_namespace *ns); -int ldlm_proc_setup(void); -#ifdef CONFIG_PROC_FS -void ldlm_proc_cleanup(void); -#else -static inline void ldlm_proc_cleanup(void) {} -#endif + +int ldlm_debugfs_setup(void); +void ldlm_debugfs_cleanup(void); + +static inline void ldlm_svc_get_eopc(const struct ldlm_request *dlm_req, + struct lprocfs_stats *srv_stats) +{ + int lock_type = 0, op = 0; + + lock_type = dlm_req->lock_desc.l_resource.lr_type; + + switch (lock_type) { + case LDLM_PLAIN: + op = PTLRPC_LAST_CNTR + LDLM_PLAIN_ENQUEUE; + break; + case LDLM_EXTENT: + op = PTLRPC_LAST_CNTR + LDLM_EXTENT_ENQUEUE; + break; + case LDLM_FLOCK: + op = PTLRPC_LAST_CNTR + LDLM_FLOCK_ENQUEUE; + break; + case LDLM_IBITS: + op = PTLRPC_LAST_CNTR + LDLM_IBITS_ENQUEUE; + break; + default: + op = 0; + break; + } + + if (op != 0) + lprocfs_counter_incr(srv_stats, op); + + return; +} /* resource.c - internal */ struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, @@ -1490,7 +1621,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, const struct lustre_handle *lockh, int rc); -int ldlm_cli_enqueue_local(struct ldlm_namespace *ns, +int ldlm_cli_enqueue_local(const struct lu_env *env, + struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, enum ldlm_type type, union ldlm_policy_data *policy, enum ldlm_mode mode, __u64 *flags, @@ -1500,8 +1632,7 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns, void *data, __u32 lvb_len, enum lvb_type lvb_type, const __u64 *client_cookie, struct lustre_handle *lockh); -int ldlm_cli_convert(const struct lustre_handle *lockh, int new_mode, - __u32 *flags); +int ldlm_cli_convert(struct ldlm_lock *lock, __u32 *flags); int ldlm_cli_update_pool(struct ptlrpc_request *req); int ldlm_cli_cancel(const struct lustre_handle *lockh, enum ldlm_cancel_flags cancel_flags); @@ -1525,6 +1656,11 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, int ldlm_cli_cancel_list(struct list_head *head, int count, struct ptlrpc_request *req, enum ldlm_cancel_flags flags); + +int ldlm_inodebits_drop(struct ldlm_lock *lock, __u64 to_drop); +int ldlm_cli_dropbits(struct ldlm_lock *lock, __u64 drop_bits); +int ldlm_cli_dropbits_list(struct list_head *converts, __u64 drop_bits); + /** @} ldlm_cli_api */ /* mds/handler.c */ @@ -1574,7 +1710,6 @@ void unlock_res_and_lock(struct ldlm_lock *lock); * There are not used outside of ldlm. * @{ */ -int ldlm_pools_recalc(enum ldlm_side client); int ldlm_pools_init(void); void ldlm_pools_fini(void); @@ -1583,7 +1718,7 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask); void ldlm_pool_fini(struct ldlm_pool *pl); int ldlm_pool_setup(struct ldlm_pool *pl, int limit); -int ldlm_pool_recalc(struct ldlm_pool *pl); +time64_t ldlm_pool_recalc(struct ldlm_pool *pl); __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl); __u64 ldlm_pool_get_slv(struct ldlm_pool *pl); __u64 ldlm_pool_get_clv(struct ldlm_pool *pl); @@ -1608,5 +1743,7 @@ static inline int ldlm_extent_contain(const struct ldlm_extent *ex1, return ex1->start <= ex2->start && ex1->end >= ex2->end; } +int ldlm_inodebits_drop(struct ldlm_lock *lock, __u64 to_drop); + #endif /** @} LDLM */