4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 /** \defgroup LDLM Lustre Distributed Lock Manager
39 * Lustre DLM is based on VAX DLM.
40 * Its two main roles are:
41 * - To provide locking assuring consistency of data on all Lustre nodes.
42 * - To allow clients to cache state protected by a lock by holding the
43 * lock until a conflicting lock is requested or it is expired by the LRU.
48 #ifndef _LUSTRE_DLM_H__
49 #define _LUSTRE_DLM_H__
51 #if defined(__linux__)
52 #include <linux/lustre_dlm.h>
53 #elif defined(__APPLE__)
54 #include <darwin/lustre_dlm.h>
55 #elif defined(__WINNT__)
56 #include <winnt/lustre_dlm.h>
58 #error Unsupported operating system.
61 #include <lustre_lib.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_handles.h>
65 #include <interval_tree.h> /* for interval_node{}, ldlm_extent */
68 #include "lustre_dlm_flags.h"
73 #define OBD_LDLM_DEVICENAME "ldlm"
75 #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
76 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
77 #define LDLM_CTIME_AGE_LIMIT (10)
78 #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
81 * LDLM non-error return states
86 ELDLM_LOCK_CHANGED = 300,
87 ELDLM_LOCK_ABORTED = 301,
88 ELDLM_LOCK_REPLACED = 302,
89 ELDLM_NO_LOCK_DATA = 303,
90 ELDLM_LOCK_WOULDBLOCK = 304,
92 ELDLM_NAMESPACE_EXISTS = 400,
93 ELDLM_BAD_NAMESPACE = 401
97 * LDLM namespace type.
98 * The "client" type is actually an indication that this is a narrow local view
99 * into complete namespace on the server. Such namespaces cannot make any
100 * decisions about lack of conflicts or do any autonomous lock granting without
101 * first speaking to a server.
104 LDLM_NAMESPACE_SERVER = 1 << 0,
105 LDLM_NAMESPACE_CLIENT = 1 << 1
109 * The blocking callback is overloaded to perform two functions. These flags
110 * indicate which operation should be performed.
112 #define LDLM_CB_BLOCKING 1
113 #define LDLM_CB_CANCELING 2
116 * \name Lock Compatibility Matrix.
118 * A lock has both a type (extent, flock, inode bits, or plain) and a mode.
119 * Lock types are described in their respective implementation files:
120 * ldlm_{extent,flock,inodebits,plain}.c.
122 * There are six lock modes along with a compatibility matrix to indicate if
123 * two locks are compatible.
125 * - EX: Exclusive mode. Before a new file is created, MDS requests EX lock
127 * - PW: Protective Write (normal write) mode. When a client requests a write
128 * lock from an OST, a lock with PW mode will be issued.
129 * - PR: Protective Read (normal read) mode. When a client requests a read from
130 * an OST, a lock with PR mode will be issued. Also, if the client opens a
131 * file for execution, it is granted a lock with PR mode.
132 * - CW: Concurrent Write mode. The type of lock that the MDS grants if a client
133 * requests a write lock during a file open operation.
134 * - CR Concurrent Read mode. When a client performs a path lookup, MDS grants
135 * an inodebit lock with the CR mode on the intermediate path component.
149 #define LCK_COMPAT_EX LCK_NL
150 #define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
151 #define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
152 #define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
153 #define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
154 #define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
155 #define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
156 #define LCK_COMPAT_COS (LCK_COS)
157 /** @} Lock Compatibility Matrix */
159 extern ldlm_mode_t lck_compat_array[];
161 static inline void lockmode_verify(ldlm_mode_t mode)
163 LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
166 static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
168 return (lck_compat_array[exist_mode] & new_mode);
173 * cluster name spaces
177 #define DLM_OST_NAMESPACE 1
178 #define DLM_MDS_NAMESPACE 2
181 - do we just separate this by security domains and use a prefix for
182 multiple namespaces in the same domain?
187 * Locking rules for LDLM:
192 * waiting_locks_spinlock
207 struct ldlm_resource;
208 struct ldlm_namespace;
211 * Operations on LDLM pools.
212 * LDLM pool is a pool of locks in the namespace without any implicitly
214 * Locks in the pool are organized in LRU.
215 * Local memory pressure or server instructions (e.g. mempressure on server)
216 * can trigger freeing of locks from the pool
218 struct ldlm_pool_ops {
219 /** Recalculate pool \a pl usage */
220 int (*po_recalc)(struct ldlm_pool *pl);
221 /** Cancel at least \a nr locks from pool \a pl */
222 int (*po_shrink)(struct ldlm_pool *pl, int nr,
223 unsigned int gfp_mask);
224 int (*po_setup)(struct ldlm_pool *pl, int limit);
227 /** One second for pools thread check interval. Each pool has own period. */
228 #define LDLM_POOLS_THREAD_PERIOD (1)
230 /** ~6% margin for modest pools. See ldlm_pool.c for details. */
231 #define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
233 /** Default recalc period for server side pools in sec. */
234 #define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
236 /** Default recalc period for client side pools in sec. */
237 #define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
240 * LDLM pool structure to track granted locks.
241 * For purposes of determining when to release locks on e.g. memory pressure.
242 * This feature is commonly referred to as lru_resize.
245 /** Pool proc directory. */
246 struct proc_dir_entry *pl_proc_dir;
247 /** Pool name, must be long enough to hold compound proc entry name. */
249 /** Lock for protecting SLV/CLV updates. */
251 /** Number of allowed locks in in pool, both, client and server side. */
252 cfs_atomic_t pl_limit;
253 /** Number of granted locks in */
254 cfs_atomic_t pl_granted;
255 /** Grant rate per T. */
256 cfs_atomic_t pl_grant_rate;
257 /** Cancel rate per T. */
258 cfs_atomic_t pl_cancel_rate;
259 /** Server lock volume (SLV). Protected by pl_lock. */
260 __u64 pl_server_lock_volume;
261 /** Current biggest client lock volume. Protected by pl_lock. */
262 __u64 pl_client_lock_volume;
263 /** Lock volume factor. SLV on client is calculated as following:
264 * server_slv * lock_volume_factor. */
265 cfs_atomic_t pl_lock_volume_factor;
266 /** Time when last SLV from server was obtained. */
267 time_t pl_recalc_time;
268 /** Recalculation period for pool. */
269 time_t pl_recalc_period;
270 /** Recalculation and shrink operations. */
271 struct ldlm_pool_ops *pl_ops;
272 /** Number of planned locks for next period. */
274 /** Pool statistics. */
275 struct lprocfs_stats *pl_stats;
278 typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
279 void *req_cookie, ldlm_mode_t mode, __u64 flags,
282 typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
286 * LVB is Lock Value Block. This is a special opaque (to LDLM) value that could
287 * be associated with an LDLM lock and transferred from client to server and
290 * Currently LVBs are used by:
291 * - OSC-OST code to maintain current object size/times
292 * - layout lock code to return the layout when the layout lock is granted
294 struct ldlm_valblock_ops {
295 int (*lvbo_init)(struct ldlm_resource *res);
296 int (*lvbo_update)(struct ldlm_resource *res,
297 struct ptlrpc_request *r,
299 int (*lvbo_free)(struct ldlm_resource *res);
300 /* Return size of lvb data appropriate RPC size can be reserved */
301 int (*lvbo_size)(struct ldlm_lock *lock);
302 /* Called to fill in lvb data to RPC buffer @buf */
303 int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
307 * LDLM pools related, type of lock pool in the namespace.
308 * Greedy means release cached locks aggressively
311 LDLM_NAMESPACE_GREEDY = 1 << 0,
312 LDLM_NAMESPACE_MODEST = 1 << 1
316 * Default values for the "max_nolock_size", "contention_time" and
317 * "contended_locks" namespace tunables.
319 #define NS_DEFAULT_MAX_NOLOCK_BYTES 0
320 #define NS_DEFAULT_CONTENTION_SECONDS 2
321 #define NS_DEFAULT_CONTENDED_LOCKS 32
323 struct ldlm_ns_bucket {
324 /** back pointer to namespace */
325 struct ldlm_namespace *nsb_namespace;
327 * Estimated lock callback time. Used by adaptive timeout code to
328 * avoid spurious client evictions due to unresponsiveness when in
329 * fact the network or overall system load is at fault
331 struct adaptive_timeout nsb_at_estimate;
335 /** LDLM namespace lock stats */
342 LDLM_NS_TYPE_UNKNOWN = 0,
360 * Namespace serves to contain locks related to a particular service.
361 * There are two kinds of namespaces:
362 * - Server namespace has knowledge of all locks and is therefore authoritative
363 * to make decisions like what locks could be granted and what conflicts
364 * exist during new lock enqueue.
365 * - Client namespace only has limited knowledge about locks in the namespace,
366 * only seeing locks held by the client.
368 * Every Lustre service has one server namespace present on the server serving
369 * that service. Every client connected to the service has a client namespace
371 * Every lock obtained by client in that namespace is actually represented by
372 * two in-memory locks. One on the server and one on the client. The locks are
373 * linked by a special cookie by which one node can tell to the other which lock
374 * it actually means during communications. Such locks are called remote locks.
375 * The locks held by server only without any reference to a client are called
378 struct ldlm_namespace {
379 /** Backward link to OBD, required for LDLM pool to store new SLV. */
380 struct obd_device *ns_obd;
382 /** Flag indicating if namespace is on client instead of server */
383 ldlm_side_t ns_client;
385 /** Resource hash table for namespace. */
386 cfs_hash_t *ns_rs_hash;
391 /** big refcount (by bucket) */
392 cfs_atomic_t ns_bref;
395 * Namespace connect flags supported by server (may be changed via
396 * /proc, LRU resize may be disabled/enabled).
398 __u64 ns_connect_flags;
400 /** Client side original connect flags supported by server. */
401 __u64 ns_orig_connect_flags;
403 /* namespace proc dir entry */
404 struct proc_dir_entry *ns_proc_dir_entry;
407 * Position in global namespace list linking all namespaces on
410 cfs_list_t ns_list_chain;
413 * List of unused locks for this namespace. This list is also called
415 * Unused locks are locks with zero reader/writer reference counts.
416 * This list is only used on clients for lock caching purposes.
417 * When we want to release some locks voluntarily or if server wants
418 * us to release some locks due to e.g. memory pressure, we take locks
419 * to release from the head of this list.
420 * Locks are linked via l_lru field in \see struct ldlm_lock.
422 cfs_list_t ns_unused_list;
423 /** Number of locks in the LRU list above */
427 * Maximum number of locks permitted in the LRU. If 0, means locks
428 * are managed by pools and there is no preset limit, rather it is all
429 * controlled by available memory on this client and on server.
431 unsigned int ns_max_unused;
432 /** Maximum allowed age (last used time) for locks in the LRU */
433 unsigned int ns_max_age;
435 * Server only: number of times we evicted clients due to lack of reply
438 unsigned int ns_timeouts;
440 * Number of seconds since the file change time after which the
441 * MDT will return an UPDATE lock along with a LOOKUP lock.
442 * This allows the client to start caching negative dentries
443 * for a directory and may save an RPC for a later stat.
445 unsigned int ns_ctime_age_limit;
448 * Used to rate-limit ldlm_namespace_dump calls.
449 * \see ldlm_namespace_dump. Increased by 10 seconds every time
452 cfs_time_t ns_next_dump;
454 /** "policy" function that does actual lock conflict determination */
455 ldlm_res_policy ns_policy;
458 * LVB operations for this namespace.
459 * \see struct ldlm_valblock_ops
461 struct ldlm_valblock_ops *ns_lvbo;
464 * Used by filter code to store pointer to OBD of the service.
465 * Should be dropped in favor of \a ns_obd
470 * Wait queue used by __ldlm_namespace_free. Gets woken up every time
471 * a resource is removed.
473 wait_queue_head_t ns_waitq;
474 /** LDLM pool structure for this namespace */
475 struct ldlm_pool ns_pool;
476 /** Definition of how eagerly unused locks will be released from LRU */
477 ldlm_appetite_t ns_appetite;
480 * If more than \a ns_contended_locks are found, the resource is
481 * considered to be contended. Lock enqueues might specify that no
482 * contended locks should be granted
484 unsigned ns_contended_locks;
487 * The resources in this namespace remember contended state during
488 * \a ns_contention_time, in seconds.
490 unsigned ns_contention_time;
493 * Limit size of contended extent locks, in bytes.
494 * If extended lock is requested for more then this many bytes and
495 * caller instructs us not to grant contended locks, we would disregard
498 unsigned ns_max_nolock_size;
500 /** Limit of parallel AST RPC count. */
501 unsigned ns_max_parallel_ast;
503 /** Callback to cancel locks before replaying it during recovery. */
504 ldlm_cancel_for_recovery ns_cancel_for_recovery;
506 /** LDLM lock stats */
507 struct lprocfs_stats *ns_stats;
510 * Flag to indicate namespace is being freed. Used to determine if
511 * recalculation of LDLM pool statistics should be skipped.
513 unsigned ns_stopping:1;
517 * Returns 1 if namespace \a ns is a client namespace.
519 static inline int ns_is_client(struct ldlm_namespace *ns)
522 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
523 LDLM_NAMESPACE_SERVER)));
524 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
525 ns->ns_client == LDLM_NAMESPACE_SERVER);
526 return ns->ns_client == LDLM_NAMESPACE_CLIENT;
530 * Returns 1 if namespace \a ns is a server namespace.
532 static inline int ns_is_server(struct ldlm_namespace *ns)
535 LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
536 LDLM_NAMESPACE_SERVER)));
537 LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
538 ns->ns_client == LDLM_NAMESPACE_SERVER);
539 return ns->ns_client == LDLM_NAMESPACE_SERVER;
543 * Returns 1 if namespace \a ns supports early lock cancel (ELC).
545 static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
548 return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
552 * Returns 1 if this namespace supports lru_resize.
554 static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
557 return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
560 static inline void ns_register_cancel(struct ldlm_namespace *ns,
561 ldlm_cancel_for_recovery arg)
564 ns->ns_cancel_for_recovery = arg;
569 /** Type for blocking callback function of a lock. */
570 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
571 struct ldlm_lock_desc *new, void *data,
573 /** Type for completion callback function of a lock. */
574 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
576 /** Type for glimpse callback function of a lock. */
577 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
579 /** Work list for sending GL ASTs to multiple locks. */
580 struct ldlm_glimpse_work {
581 struct ldlm_lock *gl_lock; /* lock to glimpse */
582 cfs_list_t gl_list; /* linkage to other gl work structs */
583 __u32 gl_flags;/* see LDLM_GL_WORK_* below */
584 union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
585 * glimpse callback request */
588 /** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
589 #define LDLM_GL_WORK_NOFREE 0x1
591 /** Interval node data for each LDLM_EXTENT lock. */
592 struct ldlm_interval {
593 struct interval_node li_node; /* node for tree management */
594 cfs_list_t li_group; /* the locks which have the same
595 * policy - group of the policy */
597 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
600 * Interval tree for extent locks.
601 * The interval tree must be accessed under the resource lock.
602 * Interval trees are used for granted extent locks to speed up conflicts
603 * lookup. See ldlm/interval_tree.c for more details.
605 struct ldlm_interval_tree {
608 ldlm_mode_t lit_mode; /* lock mode */
609 struct interval_node *lit_root; /* actual ldlm_interval */
612 /** Whether to track references to exports by LDLM locks. */
613 #define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
617 LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
618 LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
619 LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
621 } ldlm_cancel_flags_t;
627 __u64 blocking_owner;
628 struct obd_export *blocking_export;
629 /* Protected by the hash lock */
635 struct ldlm_extent l_extent;
636 struct ldlm_flock l_flock;
637 struct ldlm_inodebits l_inodebits;
638 } ldlm_policy_data_t;
640 void ldlm_convert_policy_to_wire(ldlm_type_t type,
641 const ldlm_policy_data_t *lpolicy,
642 ldlm_wire_policy_data_t *wpolicy);
643 void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
644 const ldlm_wire_policy_data_t *wpolicy,
645 ldlm_policy_data_t *lpolicy);
655 * LDLM lock structure
657 * Represents a single LDLM lock and its state in memory. Each lock is
658 * associated with a single ldlm_resource, the object which is being
659 * locked. There may be multiple ldlm_locks on a single resource,
660 * depending on the lock type and whether the locks are conflicting or
666 * When remote side wants to tell us about a lock, they address
667 * it by this opaque handle. The handle does not hold a
668 * reference on the ldlm_lock, so it can be safely passed to
669 * other threads or nodes. When the lock needs to be accessed
670 * from the handle, it is looked up again in the lock table, and
671 * may no longer exist.
673 * Must be first in the structure.
675 struct portals_handle l_handle;
677 * Lock reference count.
678 * This is how many users have pointers to actual structure, so that
679 * we do not accidentally free lock structure that is in use.
683 * Internal spinlock protects l_resource. We should hold this lock
684 * first before taking res_lock.
688 * Pointer to actual resource this lock is in.
689 * ldlm_lock_change_resource() can change this.
691 struct ldlm_resource *l_resource;
693 * List item for client side LRU list.
694 * Protected by ns_lock in struct ldlm_namespace.
698 * Linkage to resource's lock queues according to current lock state.
699 * (could be granted, waiting or converting)
700 * Protected by lr_lock in struct ldlm_resource.
702 cfs_list_t l_res_link;
704 * Tree node for ldlm_extent.
706 struct ldlm_interval *l_tree_node;
708 * Per export hash of locks.
709 * Protected by per-bucket exp->exp_lock_hash locks.
711 cfs_hlist_node_t l_exp_hash;
713 * Per export hash of flock locks.
714 * Protected by per-bucket exp->exp_flock_hash locks.
716 cfs_hlist_node_t l_exp_flock_hash;
719 * Protected by lr_lock.
721 ldlm_mode_t l_req_mode;
723 * Granted mode, also protected by lr_lock.
725 ldlm_mode_t l_granted_mode;
726 /** Lock completion handler pointer. Called when lock is granted. */
727 ldlm_completion_callback l_completion_ast;
729 * Lock blocking AST handler pointer.
730 * It plays two roles:
731 * - as a notification of an attempt to queue a conflicting lock (once)
732 * - as a notification when the lock is being cancelled.
734 * As such it's typically called twice: once for the initial conflict
735 * and then once more when the last user went away and the lock is
736 * cancelled (could happen recursively).
738 ldlm_blocking_callback l_blocking_ast;
740 * Lock glimpse handler.
741 * Glimpse handler is used to obtain LVB updates from a client by
744 ldlm_glimpse_callback l_glimpse_ast;
748 * This is a pointer to actual client export for locks that were granted
749 * to clients. Used server-side.
751 struct obd_export *l_export;
753 * Lock connection export.
754 * Pointer to server export on a client.
756 struct obd_export *l_conn_export;
759 * Remote lock handle.
760 * If the lock is remote, this is the handle of the other side lock
763 struct lustre_handle l_remote_handle;
766 * Representation of private data specific for a lock type.
767 * Examples are: extent range for extent lock or bitmask for ibits locks
769 ldlm_policy_data_t l_policy_data;
772 * Lock state flags. Protected by lr_lock.
773 * \see lustre_dlm_flags.h where the bits are defined.
778 * Lock r/w usage counters.
779 * Protected by lr_lock.
784 * If the lock is granted, a process sleeps on this waitq to learn when
785 * it's no longer in use. If the lock is not granted, a process sleeps
786 * on this waitq to learn when it becomes granted.
788 wait_queue_head_t l_waitq;
791 * Seconds. It will be updated if there is any activity related to
792 * the lock, e.g. enqueue the lock or send blocking AST.
794 cfs_time_t l_last_activity;
797 * Time last used by e.g. being matched by lock match.
798 * Jiffies. Should be converted to time if needed.
800 cfs_time_t l_last_used;
802 /** Originally requested extent for the extent lock. */
803 struct ldlm_extent l_req_extent;
806 * Client-side-only members.
809 enum lvb_type l_lvb_type;
812 * Temporary storage for a LVB received during an enqueue operation.
817 /** Private storage for lock user. Opaque to LDLM. */
821 * Server-side-only members.
825 * Connection cookie for the client originating the operation.
826 * Used by Commit on Share (COS) code. Currently only used for
827 * inodebits locks on MDS.
829 __u64 l_client_cookie;
832 * List item for locks waiting for cancellation from clients.
833 * The lists this could be linked into are:
834 * waiting_locks_list (protected by waiting_locks_spinlock),
835 * then if the lock timed out, it is moved to
836 * expired_lock_thread.elt_expired_locks for further processing.
837 * Protected by elt_lock.
839 cfs_list_t l_pending_chain;
842 * Set when lock is sent a blocking AST. Time in seconds when timeout
843 * is reached and client holding this lock could be evicted.
844 * This timeout could be further extended by e.g. certain IO activity
846 * \see ost_rw_prolong_locks
848 cfs_time_t l_callback_timeout;
850 /** Local PID of process which created this lock. */
854 * Number of times blocking AST was sent for this lock.
855 * This is for debugging. Valid values are 0 and 1, if there is an
856 * attempt to send blocking AST more than once, an assertion would be
857 * hit. \see ldlm_work_bl_ast_lock
860 /** List item ldlm_add_ast_work_item() for case of blocking ASTs. */
862 /** List item ldlm_add_ast_work_item() for case of completion ASTs. */
864 /** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */
868 * Pointer to a conflicting lock that caused blocking AST to be sent
871 struct ldlm_lock *l_blocking_lock;
874 * Protected by lr_lock, linkages to "skip lists".
875 * For more explanations of skip lists see ldlm/ldlm_inodebits.c
877 cfs_list_t l_sl_mode;
878 cfs_list_t l_sl_policy;
880 /** Reference tracking structure to debug leaked locks. */
881 struct lu_ref l_reference;
882 #if LUSTRE_TRACKS_LOCK_EXP_REFS
883 /* Debugging stuff for bug 20498, for tracking export references. */
884 /** number of export references taken */
886 /** link all locks referencing one export */
887 cfs_list_t l_exp_refs_link;
888 /** referenced export object */
889 struct obd_export *l_exp_refs_target;
892 * export blocking dlm lock list, protected by
893 * l_export->exp_bl_list_lock.
894 * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
895 * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock.
897 cfs_list_t l_exp_list;
901 * LDLM resource description.
902 * Basically, resource is a representation for a single object.
903 * Object has a name which is currently 4 64-bit integers. LDLM user is
904 * responsible for creation of a mapping between objects it wants to be
905 * protected and resource names.
907 * A resource can only hold locks of a single lock type, though there may be
908 * multiple ldlm_locks on a single resource, depending on the lock type and
909 * whether the locks are conflicting or not.
911 struct ldlm_resource {
912 struct ldlm_ns_bucket *lr_ns_bucket;
915 * List item for list in namespace hash.
916 * protected by ns_lock
918 cfs_hlist_node_t lr_hash;
920 /** Spinlock to protect locks under this resource. */
924 * protected by lr_lock
926 /** List of locks in granted state */
927 cfs_list_t lr_granted;
928 /** List of locks waiting to change their granted mode (converted) */
929 cfs_list_t lr_converting;
931 * List of locks that could not be granted due to conflicts and
932 * that are waiting for conflicts to go away */
933 cfs_list_t lr_waiting;
936 /* XXX No longer needed? Remove ASAP */
937 ldlm_mode_t lr_most_restr;
939 /** Type of locks this resource can hold. Only one type per resource. */
940 ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
943 struct ldlm_res_id lr_name;
944 /** Reference count for this resource */
945 cfs_atomic_t lr_refcount;
948 * Interval trees (only for extent locks) for all modes of this resource
950 struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];
953 * Server-side-only lock value block elements.
954 * To serialize lvbo_init.
956 struct mutex lr_lvb_mutex;
958 /** protected by lr_lock */
961 /** When the resource was considered as contended. */
962 cfs_time_t lr_contention_time;
963 /** List of references to this resource. For debugging. */
964 struct lu_ref lr_reference;
966 struct inode *lr_lvb_inode;
969 static inline bool ldlm_has_layout(struct ldlm_lock *lock)
971 return lock->l_resource->lr_type == LDLM_IBITS &&
972 lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT;
976 ldlm_ns_name(struct ldlm_namespace *ns)
978 return ns->ns_rs_hash->hs_name;
981 static inline struct ldlm_namespace *
982 ldlm_res_to_ns(struct ldlm_resource *res)
984 return res->lr_ns_bucket->nsb_namespace;
987 static inline struct ldlm_namespace *
988 ldlm_lock_to_ns(struct ldlm_lock *lock)
990 return ldlm_res_to_ns(lock->l_resource);
994 ldlm_lock_to_ns_name(struct ldlm_lock *lock)
996 return ldlm_ns_name(ldlm_lock_to_ns(lock));
999 static inline struct adaptive_timeout *
1000 ldlm_lock_to_ns_at(struct ldlm_lock *lock)
1002 return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
1005 static inline int ldlm_lvbo_init(struct ldlm_resource *res)
1007 struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1009 if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL)
1010 return ns->ns_lvbo->lvbo_init(res);
1015 static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
1017 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
1019 if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL)
1020 return ns->ns_lvbo->lvbo_size(lock);
1025 static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
1027 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
1029 if (ns->ns_lvbo != NULL) {
1030 LASSERT(ns->ns_lvbo->lvbo_fill != NULL);
1031 return ns->ns_lvbo->lvbo_fill(lock, buf, len);
1036 struct ldlm_ast_work {
1037 struct ldlm_lock *w_lock;
1039 struct ldlm_lock_desc w_desc;
1047 * Common ldlm_enqueue parameters
1049 struct ldlm_enqueue_info {
1050 __u32 ei_type; /** Type of the lock being enqueued. */
1051 __u32 ei_mode; /** Mode of the lock being enqueued. */
1052 void *ei_cb_bl; /** blocking lock callback */
1053 void *ei_cb_cp; /** lock completion callback */
1054 void *ei_cb_gl; /** lock glimpse callback */
1055 void *ei_cbdata; /** Data to be passed into callbacks. */
1058 extern struct obd_ops ldlm_obd_ops;
1060 extern char *ldlm_lockname[];
1061 extern char *ldlm_typename[];
1062 extern char *ldlm_it2str(int it);
1065 * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
1066 * For the cases where we do not have actual lock to print along
1067 * with a debugging message that is ldlm-related
1069 #define LDLM_DEBUG_NOLOCK(format, a...) \
1070 CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
1073 * Support function for lock information printing into debug logs.
1077 #define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
1078 CFS_CHECK_STACK(msgdata, mask, cdls); \
1080 if (((mask) & D_CANTMASK) != 0 || \
1081 ((libcfs_debug & (mask)) != 0 && \
1082 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1083 _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
1086 void _ldlm_lock_debug(struct ldlm_lock *lock,
1087 struct libcfs_debug_msg_data *data,
1088 const char *fmt, ...)
1089 __attribute__ ((format (printf, 3, 4)));
1092 * Rate-limited version of lock printing function.
1094 #define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
1095 static cfs_debug_limit_state_t _ldlm_cdls; \
1096 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
1097 ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
1100 #define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
1101 #define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
1103 /** Non-rate-limited lock printing function for debugging purposes. */
1104 #define LDLM_DEBUG(lock, fmt, a...) do { \
1105 if (likely(lock != NULL)) { \
1106 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
1107 ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \
1108 "### " fmt , ##a); \
1110 LDLM_DEBUG_NOLOCK("no dlm lock: " fmt, ##a); \
1113 #else /* !LIBCFS_DEBUG */
1114 # define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) ((void)0)
1115 # define LDLM_DEBUG(lock, fmt, a...) ((void)0)
1116 # define LDLM_ERROR(lock, fmt, a...) ((void)0)
1119 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
1120 int first_enq, ldlm_error_t *err,
1121 cfs_list_t *work_list);
1124 * Return values for lock iterators.
1125 * Also used during deciding of lock grants and cancellations.
1127 #define LDLM_ITER_CONTINUE 1 /* keep iterating */
1128 #define LDLM_ITER_STOP 2 /* stop iterating */
1130 typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
1131 typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
1133 /** \defgroup ldlm_iterator Lock iterators
1135 * LDLM provides for a way to iterate through every lock on a resource or
1136 * namespace or every resource in a namespace.
1138 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
1140 void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
1142 int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
1143 ldlm_iterator_t iter, void *data);
1144 /** @} ldlm_iterator */
1146 int ldlm_replay_locks(struct obd_import *imp);
1149 int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1152 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
1154 struct ldlm_callback_suite {
1155 ldlm_completion_callback lcs_completion;
1156 ldlm_blocking_callback lcs_blocking;
1157 ldlm_glimpse_callback lcs_glimpse;
1161 #ifdef HAVE_SERVER_SUPPORT
1162 /** \defgroup ldlm_srv_ast Server AST handlers
1163 * These are AST handlers used by server code.
1164 * Their property is that they are just preparing RPCs to be sent to clients.
1167 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
1168 void *data, int flag);
1169 int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1170 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
1171 int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
1172 /** @} ldlm_srv_ast */
1174 /** \defgroup ldlm_handlers Server LDLM handlers
1175 * These are handler functions that should be called by "frontends" such as
1176 * MDT or OST to pass through LDLM requests to LDLM for handling
1179 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
1180 ldlm_blocking_callback, ldlm_glimpse_callback);
1181 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
1182 const struct ldlm_request *dlm_req,
1183 const struct ldlm_callback_suite *cbs);
1184 int ldlm_handle_convert(struct ptlrpc_request *req);
1185 int ldlm_handle_convert0(struct ptlrpc_request *req,
1186 const struct ldlm_request *dlm_req);
1187 int ldlm_handle_cancel(struct ptlrpc_request *req);
1188 int ldlm_request_cancel(struct ptlrpc_request *req,
1189 const struct ldlm_request *dlm_req, int first);
1190 /** @} ldlm_handlers */
1192 void ldlm_revoke_export_locks(struct obd_export *exp);
1194 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
1195 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
1196 int ldlm_get_ref(void);
1197 void ldlm_put_ref(void);
1198 int ldlm_init_export(struct obd_export *exp);
1199 void ldlm_destroy_export(struct obd_export *exp);
1200 struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req);
1203 #ifdef HAVE_SERVER_SUPPORT
1204 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
1206 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
1207 void ldlm_lock2handle(const struct ldlm_lock *lock,
1208 struct lustre_handle *lockh);
1209 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
1210 void ldlm_cancel_callback(struct ldlm_lock *);
1211 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
1212 int ldlm_lock_set_data(struct lustre_handle *, void *);
1215 * Obtain a lock reference by its handle.
1217 static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
1219 return __ldlm_handle2lock(h, 0);
1222 #define LDLM_LOCK_REF_DEL(lock) \
1223 lu_ref_del(&lock->l_reference, "handle", current)
1225 static inline struct ldlm_lock *
1226 ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
1228 struct ldlm_lock *lock;
1230 lock = __ldlm_handle2lock(h, flags);
1232 LDLM_LOCK_REF_DEL(lock);
1237 * Update Lock Value Block Operations (LVBO) on a resource taking into account
1238 * data from reqest \a r
1240 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
1241 struct ptlrpc_request *r, int increase)
1243 if (ldlm_res_to_ns(res)->ns_lvbo &&
1244 ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
1245 return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
1251 int ldlm_error2errno(ldlm_error_t error);
1252 ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
1253 * confuses user-space. */
1254 #if LUSTRE_TRACKS_LOCK_EXP_REFS
1255 void ldlm_dump_export_locks(struct obd_export *exp);
1259 * Release a temporary lock reference obtained by ldlm_handle2lock() or
1260 * __ldlm_handle2lock().
1262 #define LDLM_LOCK_PUT(lock) \
1264 LDLM_LOCK_REF_DEL(lock); \
1265 /*LDLM_DEBUG((lock), "put");*/ \
1266 ldlm_lock_put(lock); \
1270 * Release a lock reference obtained by some other means (see
1273 #define LDLM_LOCK_RELEASE(lock) \
1275 /*LDLM_DEBUG((lock), "put");*/ \
1276 ldlm_lock_put(lock); \
1279 #define LDLM_LOCK_GET(lock) \
1281 ldlm_lock_get(lock); \
1282 /*LDLM_DEBUG((lock), "get");*/ \
1286 #define ldlm_lock_list_put(head, member, count) \
1288 struct ldlm_lock *_lock, *_next; \
1290 cfs_list_for_each_entry_safe(_lock, _next, head, member) { \
1293 cfs_list_del_init(&_lock->member); \
1294 LDLM_LOCK_RELEASE(_lock); \
1299 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
1300 void ldlm_lock_put(struct ldlm_lock *lock);
1301 void ldlm_lock_destroy(struct ldlm_lock *lock);
1302 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
1303 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
1304 int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
1305 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
1306 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
1307 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
1308 void ldlm_lock_fail_match(struct ldlm_lock *lock);
1309 void ldlm_lock_allow_match(struct ldlm_lock *lock);
1310 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
1311 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
1312 const struct ldlm_res_id *, ldlm_type_t type,
1313 ldlm_policy_data_t *, ldlm_mode_t mode,
1314 struct lustre_handle *, int unref);
1315 ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
1317 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1319 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
1320 void ldlm_lock_cancel(struct ldlm_lock *lock);
1321 void ldlm_reprocess_all(struct ldlm_resource *res);
1322 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
1323 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
1324 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
1327 struct ldlm_namespace *
1328 ldlm_namespace_new(struct obd_device *obd, char *name,
1329 ldlm_side_t client, ldlm_appetite_t apt,
1330 ldlm_ns_type_t ns_type);
1331 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
1332 void ldlm_namespace_free(struct ldlm_namespace *ns,
1333 struct obd_import *imp, int force);
1334 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
1335 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
1336 void ldlm_namespace_get(struct ldlm_namespace *ns);
1337 void ldlm_namespace_put(struct ldlm_namespace *ns);
1338 int ldlm_proc_setup(void);
1340 void ldlm_proc_cleanup(void);
1342 static inline void ldlm_proc_cleanup(void) {}
1345 /* resource.c - internal */
1346 struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
1347 struct ldlm_resource *parent,
1348 const struct ldlm_res_id *,
1349 ldlm_type_t type, int create);
1350 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
1351 int ldlm_resource_putref(struct ldlm_resource *res);
1352 void ldlm_resource_add_lock(struct ldlm_resource *res,
1354 struct ldlm_lock *lock);
1355 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
1356 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
1357 void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
1358 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
1359 void ldlm_resource_dump(int level, struct ldlm_resource *);
1360 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
1361 const struct ldlm_res_id *);
1363 #define LDLM_RESOURCE_ADDREF(res) do { \
1364 lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, current); \
1367 #define LDLM_RESOURCE_DELREF(res) do { \
1368 lu_ref_del(&(res)->lr_reference, __FUNCTION__, current); \
1371 /* ldlm_request.c */
1372 int ldlm_expired_completion_wait(void *data);
1373 /** \defgroup ldlm_local_ast Default AST handlers for local locks
1374 * These AST handlers are typically used for server-side local locks and are
1375 * also used by client-side lock handlers to perform minimum level base
1378 int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
1379 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1380 void *data, int flag);
1381 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
1382 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
1383 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1384 /** @} ldlm_local_ast */
1386 /** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users.
1387 * These are typically used by client and server (*_local versions)
1388 * to obtain and release locks.
1390 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
1391 struct ldlm_enqueue_info *einfo,
1392 const struct ldlm_res_id *res_id,
1393 ldlm_policy_data_t const *policy, __u64 *flags,
1394 void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
1395 struct lustre_handle *lockh, int async);
1396 int ldlm_prep_enqueue_req(struct obd_export *exp,
1397 struct ptlrpc_request *req,
1398 cfs_list_t *cancels,
1400 int ldlm_prep_elc_req(struct obd_export *exp,
1401 struct ptlrpc_request *req,
1402 int version, int opc, int canceloff,
1403 cfs_list_t *cancels, int count);
1405 struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len);
1406 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
1407 const struct ldlm_request *dlm_req,
1408 const struct ldlm_callback_suite *cbs);
1409 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
1410 ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
1411 __u64 *flags, void *lvb, __u32 lvb_len,
1412 struct lustre_handle *lockh, int rc);
1413 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
1414 const struct ldlm_res_id *res_id,
1415 ldlm_type_t type, ldlm_policy_data_t *policy,
1416 ldlm_mode_t mode, __u64 *flags,
1417 ldlm_blocking_callback blocking,
1418 ldlm_completion_callback completion,
1419 ldlm_glimpse_callback glimpse,
1420 void *data, __u32 lvb_len, enum lvb_type lvb_type,
1421 const __u64 *client_cookie,
1422 struct lustre_handle *lockh);
1423 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
1424 void *data, __u32 data_len);
1425 int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
1426 int ldlm_cli_update_pool(struct ptlrpc_request *req);
1427 int ldlm_cli_cancel(struct lustre_handle *lockh,
1428 ldlm_cancel_flags_t cancel_flags);
1429 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
1430 ldlm_cancel_flags_t flags, void *opaque);
1431 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1432 const struct ldlm_res_id *res_id,
1433 ldlm_policy_data_t *policy,
1435 ldlm_cancel_flags_t flags,
1437 int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
1438 int count, ldlm_cancel_flags_t flags);
1439 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1440 cfs_list_t *cancels,
1441 ldlm_policy_data_t *policy,
1442 ldlm_mode_t mode, __u64 lock_flags,
1443 ldlm_cancel_flags_t cancel_flags, void *opaque);
1444 int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
1445 ldlm_cancel_flags_t flags);
1446 int ldlm_cli_cancel_list(cfs_list_t *head, int count,
1447 struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
1448 /** @} ldlm_cli_api */
1451 /* This has to be here because recursive inclusion sucks. */
1452 int intent_disposition(struct ldlm_reply *rep, int flag);
1453 void intent_set_disposition(struct ldlm_reply *rep, int flag);
1456 /* ioctls for trying requests */
1457 #define IOC_LDLM_TYPE 'f'
1458 #define IOC_LDLM_MIN_NR 40
1460 #define IOC_LDLM_TEST _IOWR('f', 40, long)
1461 #define IOC_LDLM_DUMP _IOWR('f', 41, long)
1462 #define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
1463 #define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
1464 #define IOC_LDLM_MAX_NR 43
1467 * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
1468 * than one lock_res is dead-lock safe.
1470 enum lock_res_type {
1475 /** Lock resource. */
1476 static inline void lock_res(struct ldlm_resource *res)
1478 spin_lock(&res->lr_lock);
1481 /** Lock resource with a way to instruct lockdep code about nestedness-safe. */
1482 static inline void lock_res_nested(struct ldlm_resource *res,
1483 enum lock_res_type mode)
1485 spin_lock_nested(&res->lr_lock, mode);
1488 /** Unlock resource. */
1489 static inline void unlock_res(struct ldlm_resource *res)
1491 spin_unlock(&res->lr_lock);
1494 /** Check if resource is already locked, assert if not. */
1495 static inline void check_res_locked(struct ldlm_resource *res)
1497 LASSERT(spin_is_locked(&res->lr_lock));
1500 struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
1501 void unlock_res_and_lock(struct ldlm_lock *lock);
1504 /** \defgroup ldlm_pools Various LDLM pool related functions
1505 * There are not used outside of ldlm.
1508 int ldlm_pools_recalc(ldlm_side_t client);
1509 int ldlm_pools_init(void);
1510 void ldlm_pools_fini(void);
1512 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
1513 int idx, ldlm_side_t client);
1514 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
1515 unsigned int gfp_mask);
1516 void ldlm_pool_fini(struct ldlm_pool *pl);
1517 int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
1518 int ldlm_pool_recalc(struct ldlm_pool *pl);
1519 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
1520 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
1521 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl);
1522 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
1523 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
1524 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
1525 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
1526 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
1527 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);