4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define MAX_STRING_SIZE 128
35 extern int ldlm_srv_namespace_nr;
36 extern int ldlm_cli_namespace_nr;
37 extern struct mutex ldlm_srv_namespace_lock;
38 extern struct list_head ldlm_srv_namespace_list;
39 extern struct mutex ldlm_cli_namespace_lock;
40 extern struct list_head ldlm_cli_active_namespace_list;
41 extern struct list_head ldlm_cli_inactive_namespace_list;
42 extern unsigned int ldlm_cancel_unused_locks_before_replay;
43 extern struct kmem_cache *ldlm_glimpse_work_kmem;
45 static inline int ldlm_namespace_nr_read(enum ldlm_side client)
47 return client == LDLM_NAMESPACE_SERVER ?
48 ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
51 static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
53 if (client == LDLM_NAMESPACE_SERVER)
54 ldlm_srv_namespace_nr++;
56 ldlm_cli_namespace_nr++;
59 static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
61 if (client == LDLM_NAMESPACE_SERVER)
62 ldlm_srv_namespace_nr--;
64 ldlm_cli_namespace_nr--;
67 static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
69 return client == LDLM_NAMESPACE_SERVER ?
70 &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
74 struct list_head *ldlm_namespace_inactive_list(enum ldlm_side client)
76 return client == LDLM_NAMESPACE_SERVER ?
77 &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
80 static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
82 return client == LDLM_NAMESPACE_SERVER ?
83 &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
86 /* ns_bref is the number of resources in this namespace */
87 static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
89 return atomic_read(&ns->ns_bref) == 0;
92 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
94 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
96 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
99 /* Cancel lru flag, it indicates we cancel aged locks. */
100 enum ldlm_lru_flags {
101 LDLM_LRU_FLAG_AGED = 0x01, /* Cancel aged locks (non LRU resize) */
102 LDLM_LRU_FLAG_PASSED = 0x02, /* Cancel passed number of locks */
103 LDLM_LRU_FLAG_SHRINK = 0x04, /* Cancel locks from shrinker */
104 LDLM_LRU_FLAG_LRUR = 0x08, /* Cancel locks from lru resize */
105 LDLM_LRU_FLAG_NO_WAIT = 0x10, /* Cancel locks w/o blocking (neither
106 * sending nor waiting for any RPCs) */
107 LDLM_LRU_FLAG_CLEANUP = 0x20, /* Used when clearing lru, tells
108 * prepare_lru_list to set discard flag
109 * on PR extent locks so we don't waste
110 * time saving pages that will be
111 * discarded momentarily */
114 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
115 enum ldlm_cancel_flags cancel_flags,
116 enum ldlm_lru_flags lru_flags);
117 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
118 struct list_head *cancels, int count, int max,
119 enum ldlm_cancel_flags cancel_flags,
120 enum ldlm_lru_flags lru_flags);
121 extern unsigned int ldlm_enqueue_min;
122 /* ldlm_resource.c */
123 extern struct kmem_cache *ldlm_resource_slab;
124 extern struct kmem_cache *ldlm_lock_slab;
125 extern struct kmem_cache *ldlm_inodebits_slab;
126 extern struct kmem_cache *ldlm_interval_tree_slab;
128 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
129 struct ldlm_lock *new);
136 LDLM_WORK_REVOKE_AST,
140 void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock);
141 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
142 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
143 enum req_location loc, void *data, int size);
145 ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
146 enum ldlm_type type, enum ldlm_mode mode,
147 const struct ldlm_callback_suite *cbs,
148 void *data, __u32 lvb_len, enum lvb_type lvb_type);
149 enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
150 struct ldlm_namespace *,
152 void *cookie, __u64 *flags);
153 void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
154 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
155 void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
156 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
157 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
158 struct list_head *work_list);
159 #ifdef HAVE_SERVER_SUPPORT
160 int ldlm_reprocess_queue(struct ldlm_resource *res, struct list_head *queue,
161 struct list_head *work_list,
162 enum ldlm_process_intention intention,
163 struct ldlm_lock *hint);
164 int ldlm_handle_conflict_lock(struct ldlm_lock *lock, __u64 *flags,
165 struct list_head *rpc_list);
166 void ldlm_discard_bl_list(struct list_head *bl_list);
167 void ldlm_clear_blocking_lock(struct ldlm_lock *lock);
168 void ldlm_clear_blocking_data(struct ldlm_lock *lock);
170 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
171 ldlm_desc_ast_t ast_type);
172 int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq);
173 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, ktime_t last_use);
174 #define ldlm_lock_remove_from_lru(lock) \
175 ldlm_lock_remove_from_lru_check(lock, ktime_set(0, 0))
176 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
177 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
178 void ldlm_lock_add_to_lru(struct ldlm_lock *lock);
179 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock);
180 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
182 int ldlm_export_cancel_blocked_locks(struct obd_export *exp);
183 int ldlm_export_cancel_locks(struct obd_export *exp);
184 void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock);
187 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
188 struct ldlm_lock *lock);
189 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
190 struct ldlm_lock_desc *ld,
191 struct list_head *cancels, int count,
192 enum ldlm_cancel_flags cancel_flags);
193 int ldlm_bl_thread_wakeup(void);
195 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
196 struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
197 void ldlm_bl_desc2lock(const struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
199 #ifdef HAVE_SERVER_SUPPORT
201 int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
202 enum ldlm_process_intention intention,
203 enum ldlm_error *err, struct list_head *work_list);
205 /* ldlm_inodebits.c */
206 int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *flags,
207 enum ldlm_process_intention intention,
208 enum ldlm_error *err,
209 struct list_head *work_list);
210 int ldlm_reprocess_inodebits_queue(struct ldlm_resource *res,
211 struct list_head *queue,
212 struct list_head *work_list,
213 enum ldlm_process_intention intention,
214 struct ldlm_lock *hint);
216 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
217 enum ldlm_process_intention intention,
218 enum ldlm_error *err, struct list_head *work_list);
220 int ldlm_extent_alloc_lock(struct ldlm_lock *lock);
221 void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
222 void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
224 int ldlm_inodebits_alloc_lock(struct ldlm_lock *lock);
225 void ldlm_inodebits_add_lock(struct ldlm_resource *res, struct list_head *head,
226 struct ldlm_lock *lock);
227 void ldlm_inodebits_unlink_lock(struct ldlm_lock *lock);
230 int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
231 enum ldlm_process_intention intention,
232 enum ldlm_error *err, struct list_head *work_list);
233 int ldlm_init_flock_export(struct obd_export *exp);
234 void ldlm_destroy_flock_export(struct obd_export *exp);
237 void l_check_ns_lock(struct ldlm_namespace *ns);
238 void l_check_no_ns_lock(struct ldlm_namespace *ns);
240 extern struct dentry *ldlm_svc_debugfs_dir;
243 struct ptlrpc_service *ldlm_cb_service;
244 struct ptlrpc_service *ldlm_cancel_service;
245 struct ptlrpc_client *ldlm_client;
246 struct ptlrpc_connection *ldlm_server_conn;
247 struct ldlm_bl_pool *ldlm_bl_pool;
250 /* interval tree, for LDLM_EXTENT. */
251 extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
252 extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
253 extern struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
254 extern void ldlm_interval_free(struct ldlm_interval *node);
255 /* this function must be called with res lock held */
256 static inline struct ldlm_extent *
257 ldlm_interval_extent(struct ldlm_interval *node)
259 struct ldlm_lock *lock;
260 LASSERT(!list_empty(&node->li_group));
262 lock = list_entry(node->li_group.next, struct ldlm_lock,
264 return &lock->l_policy_data.l_extent;
268 void ldlm_exit(void);
270 enum ldlm_policy_res {
271 LDLM_POLICY_CANCEL_LOCK,
272 LDLM_POLICY_KEEP_LOCK,
273 LDLM_POLICY_SKIP_LOCK
276 #define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
277 #define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
278 #define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
279 #define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; }
280 #define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v))
281 #define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b)
283 #define LDLM_POOL_SYSFS_READER_SHOW(var, type) \
284 static ssize_t var##_show(struct kobject *kobj, \
285 struct attribute *attr, \
288 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
292 spin_lock(&pl->pl_lock); \
293 tmp = pl->pl_##var; \
294 spin_unlock(&pl->pl_lock); \
296 return LDLM_POOL_SYSFS_PRINT_##type(tmp); \
298 struct __##var##__dummy_read {;} /* semicolon catcher */
300 #define LDLM_POOL_SYSFS_WRITER_STORE(var, type) \
301 static ssize_t var##_store(struct kobject *kobj, \
302 struct attribute *attr, \
303 const char *buffer, \
306 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
311 rc = kstrtoul(buffer, 10, &tmp); \
316 spin_lock(&pl->pl_lock); \
317 LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
318 spin_unlock(&pl->pl_lock); \
322 struct __##var##__dummy_write {; } /* semicolon catcher */
324 #define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type) \
325 static ssize_t var##_show(struct kobject *kobj, \
326 struct attribute *attr, \
329 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
332 return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var); \
334 struct __##var##__dummy_read {; } /* semicolon catcher */
336 #define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type) \
337 static ssize_t var##_store(struct kobject *kobj, \
338 struct attribute *attr, \
339 const char *buffer, \
342 struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,\
347 rc = kstrtoul(buffer, 10, &tmp); \
352 LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp); \
356 struct __##var##__dummy_write {; } /* semicolon catcher */
359 ldlm_add_var(struct lprocfs_vars *vars, struct dentry *debugfs_entry,
360 const char *name, void *data, const struct file_operations *ops)
362 snprintf((char *)vars->name, MAX_STRING_SIZE, "%s", name);
365 ldebugfs_add_vars(debugfs_entry, vars, NULL);
368 static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
372 lock_res_and_lock(lock);
373 ret = is_granted_or_cancelled_nolock(lock);
374 unlock_res_and_lock(lock);
379 static inline bool is_bl_done(struct ldlm_lock *lock)
383 if (!ldlm_is_bl_done(lock)) {
384 lock_res_and_lock(lock);
385 bl_done = ldlm_is_bl_done(lock);
386 unlock_res_and_lock(lock);
392 static inline bool is_lock_converted(struct ldlm_lock *lock)
396 lock_res_and_lock(lock);
397 ret = (lock->l_policy_data.l_inodebits.cancel_bits == 0);
398 unlock_res_and_lock(lock);
403 typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
404 union ldlm_policy_data *);
405 typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
406 union ldlm_wire_policy_data *);
407 void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
408 union ldlm_policy_data *lpolicy);
409 void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
410 union ldlm_wire_policy_data *wpolicy);
411 void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
412 union ldlm_policy_data *lpolicy);
413 void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
414 union ldlm_wire_policy_data *wpolicy);
415 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
416 union ldlm_policy_data *lpolicy);
417 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
418 union ldlm_wire_policy_data *wpolicy);
419 void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
420 union ldlm_policy_data *lpolicy);
421 void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
422 union ldlm_wire_policy_data *wpolicy);
425 #ifdef HAVE_SERVER_SUPPORT
426 extern __u64 ldlm_reclaim_threshold;
427 extern __u64 ldlm_lock_limit;
428 extern __u64 ldlm_reclaim_threshold_mb;
429 extern __u64 ldlm_lock_limit_mb;
430 extern struct percpu_counter ldlm_granted_total;
432 int ldlm_reclaim_setup(void);
433 void ldlm_reclaim_cleanup(void);
434 void ldlm_reclaim_add(struct ldlm_lock *lock);
435 void ldlm_reclaim_del(struct ldlm_lock *lock);
436 bool ldlm_reclaim_full(void);
438 static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
439 const struct ldlm_res_id *res1)
441 return memcmp(res0, res1, sizeof(*res0)) == 0;