#define OBD_LDLM_DEVICENAME "ldlm"
#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
-#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */
+#define LDLM_DEFAULT_MAX_ALIVE 3900 /* 3900 seconds ~65 min */
#define LDLM_CTIME_AGE_LIMIT (10)
#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
* of ldlm_[res_]lvbo_[init,update,fill]() functions.
*/
struct ldlm_valblock_ops {
- int (*lvbo_init)(struct ldlm_resource *res);
- int (*lvbo_update)(struct ldlm_resource *res,
- struct ptlrpc_request *r,
- int increase);
- int (*lvbo_free)(struct ldlm_resource *res);
+ int (*lvbo_init)(struct ldlm_resource *res);
+ int (*lvbo_update)(struct ldlm_resource *res, struct ldlm_lock *lock,
+ struct ptlrpc_request *r, int increase);
+ int (*lvbo_free)(struct ldlm_resource *res);
/* Return size of lvb data appropriate RPC size can be reserved */
int (*lvbo_size)(struct ldlm_lock *lock);
/* Called to fill in lvb data to RPC buffer @buf */
* controlled by available memory on this client and on server.
*/
unsigned int ns_max_unused;
+
/** Maximum allowed age (last used time) for locks in the LRU */
- unsigned int ns_max_age;
+ ktime_t ns_max_age;
+
/**
* Server only: number of times we evicted clients due to lack of reply
* to ASTs.
struct ldlm_lock *ca_lock;
};
-/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
-#define LDLM_GL_WORK_NOFREE 0x1
+/** The ldlm_glimpse_work was slab allocated & must be freed accordingly.*/
+#define LDLM_GL_WORK_SLAB_ALLOCATED 0x1
/** Interval node data for each LDLM_EXTENT lock. */
struct ldlm_interval {
time64_t l_last_activity;
/**
- * Time last used by e.g. being matched by lock match.
- * Jiffies. Should be converted to time if needed.
+ * Time, in nanoseconds, last used by e.g. being matched by lock match.
*/
- cfs_time_t l_last_used;
+ ktime_t l_last_used;
/** Originally requested extent for the extent lock. */
struct ldlm_extent l_req_extent;
* The lists this could be linked into are:
* waiting_locks_list (protected by waiting_locks_spinlock),
* then if the lock timed out, it is moved to
- * expired_lock_thread.elt_expired_locks for further processing.
- * Protected by elt_lock.
+ * expired_lock_list for further processing.
*/
struct list_head l_pending_chain;
void *ei_cb_gl; /** lock glimpse callback */
void *ei_cbdata; /** Data to be passed into callbacks. */
void *ei_namespace; /** lock namespace **/
- unsigned int ei_enq_slave:1, /** whether enqueue slave stripes */
- ei_nonblock:1; /** non block enqueue */
+ unsigned int ei_enq_slave:1; /** whether enqueue slave stripes */
};
#define ei_res_id ei_cb_gl
# define LDLM_ERROR(lock, fmt, a...) ((void)0)
#endif
+/*
+ * Three intentions can be used for the policy functions in
+ * ldlm_processing_policy.
+ *
+ * LDLM_PROCESS_RESCAN:
+ *
+ * It's used when policy functions are called from ldlm_reprocess_queue() to
+ * reprocess the wait & convert list and try to grant locks, blocking ASTs
+ * have already been sent in this situation, completion ASTs need be sent for
+ * the locks being granted.
+ *
+ * LDLM_PROCESS_ENQUEUE:
+ *
+ * It's used when policy functions are called from ldlm_lock_enqueue() to
+ * process the wait & convert list for handling an enqueue request, blocking
+ * ASTs have not been sent yet, so list of conflicting locks would be
+ * collected and ASTs sent.
+ *
+ * LDLM_PROCESS_RECOVERY:
+ *
+ * It's used when policy functions are called from ldlm_reprocess_queue() to
+ * reprocess the wait & convert list when recovery done. In case of blocking
+ * ASTs are lost before recovery, it needs not only to grant locks if
+ * available, but also send blocking ASTs to the locks doesn't have AST sent
+ * flag. Completion ASTs need be sent for the locks being granted.
+ */
+enum ldlm_process_intention {
+ LDLM_PROCESS_RESCAN = 0,
+ LDLM_PROCESS_ENQUEUE = 1,
+ LDLM_PROCESS_RECOVERY = 2,
+};
+
typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, enum ldlm_error *err,
+ enum ldlm_process_intention intention,
+ enum ldlm_error *err,
struct list_head *work_list);
/**
* Update Lock Value Block Operations (LVBO) on a resource taking into account
* data from request \a r
*/
-static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
- struct ptlrpc_request *req, int increase)
+static inline int ldlm_lvbo_update(struct ldlm_resource *res,
+ struct ldlm_lock *lock,
+ struct ptlrpc_request *req, int increase)
{
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
int rc;
/* delayed lvb init may be required */
return rc;
}
- if (ldlm_res_to_ns(res)->ns_lvbo &&
- ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
- return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, req,
- increase);
- }
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_update)
+ return ns->ns_lvbo->lvbo_update(res, lock, req, increase);
+
return 0;
}
+static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
+ struct ptlrpc_request *req, int increase)
+{
+ return ldlm_lvbo_update(res, NULL, req, increase);
+}
+
int ldlm_error2errno(enum ldlm_error error);
enum ldlm_error ldlm_errno2error(int err_no); /* don't call it `errno': this
* confuses user-space. */
void ldlm_lock_downgrade(struct ldlm_lock *lock, enum ldlm_mode new_mode);
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_reprocess_all(struct ldlm_resource *res);
-void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
+void ldlm_reprocess_recovery_done(struct ldlm_namespace *ns);
void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);