-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
+struct ldlm_glimpse_work {
+ struct ldlm_lock *gl_lock; /* lock to glimpse */
+ cfs_list_t gl_list; /* linkage to other gl work structs */
+ __u32 gl_flags;/* see LDLM_GL_WORK_* below */
+};
+
+/* the ldlm_glimpse_work is allocated on the stack and should not be freed */
+#define LDLM_GL_WORK_NOFREE 0x1
+
/* Interval node data for each LDLM_EXTENT lock */
struct ldlm_interval {
struct interval_node li_node; /* node for tree mgmt */
struct interval_node *lit_root; /* actually ldlm_interval */
};
-#define LUSTRE_TRACKS_LOCK_EXP_REFS (1)
+#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
/* Cancel flag. */
typedef enum {
__u64 owner;
__u64 blocking_owner;
struct obd_export *blocking_export;
+ /* Protected by the hash lock */
+ __u32 blocking_refs;
__u32 pid;
};
/**
* Protected by lr_lock. Requested mode.
*/
+ /**
+ * Protected by per-bucket exp->exp_flock_hash locks. Per export hash
+ * of locks.
+ */
+ cfs_hlist_node_t l_exp_flock_hash;
+
ldlm_mode_t l_req_mode;
/**
* Granted mode, also protected by lr_lock.
* Protected by lock and resource locks.
*/
l_destroyed:1,
+ /*
+ * it's set in lock_res_and_lock() and unset in unlock_res_and_lock().
+ *
+ * NB: compare with check_res_locked(), check this bit is cheaper,
+ * also, spin_is_locked() is deprecated for kernel code, one reason is
+ * because it works only for SMP so user needs add extra macros like
+ * LASSERT_SPIN_LOCKED for uniprocessor kernels.
+ */
+ l_res_locked:1,
+ /*
+ * it's set once we call ldlm_add_waiting_lock_res_locked()
+ * to start the lock-timeout timer and it will never be reset.
+ *
+ * Protected by lock_res_and_lock().
+ */
+ l_waited:1,
/**
* flag whether this is a server namespace lock.
*/
ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, "### " fmt , ##a);\
} while (0)
#else /* !LIBCFS_DEBUG */
+# define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) ((void)0)
# define LDLM_DEBUG(lock, fmt, a...) ((void)0)
# define LDLM_ERROR(lock, fmt, a...) ((void)0)
-# define ldlm_lock_debuf(cdls, level, lock, file, func, line, fmt, a...) \
- ((void)0)
#endif
#define LDLM_DEBUG_NOLOCK(format, a...) \
void *data, int flag);
int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
+int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
ldlm_blocking_callback, ldlm_glimpse_callback);
int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_reprocess_all(struct ldlm_resource *res);
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
-void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
void ldlm_lock_dump_handle(int level, struct lustre_handle *);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
cfs_spin_lock_nested(&res->lr_lock, mode);
}
-
static inline void unlock_res(struct ldlm_resource *res)
{
cfs_spin_unlock(&res->lr_lock);