Description: Kill unused ldlm_handle2lock_ns() function.
Details : Kill unused ldlm_handle2lock_ns() function.
-Severity : minor
-Bugzilla : 16450
-Description: Kill unused ldlm_handle2lock_ns() function.
-Details : Kill unused ldlm_handle2lock_ns() function.
-
Severity : normal
Bugzilla : 16450
Description: Add lu_ref support to ldlm_lock
attempts to addref a lock that might be being canceled
concurrently.
+Severity : normal
+Bugzilla : 16450
+Description: Add ldlm_weigh_callback().
+Details : Add new ->l_weigh_ast() call-back to ldlm_lock. It is called
+ by ldlm_cancel_shrink_policy() to estimate lock "value", instead of
+ hard-coded `number of pages' logic.
+
--------------------------------------------------------------------------------
2007-08-10 Cluster File Systems, Inc. <info@clusterfs.com>
/**
* Lower limit to number of pages in lock to keep it in cache.
*/
- unsigned int ns_shrink_thumb;
+ unsigned long ns_shrink_thumb;
/**
* Next debug dump, jiffies.
typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
void *data);
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
+typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
/* Interval node data for each LDLM_EXTENT lock */
struct ldlm_interval {
* Lock glimpse handler.
*/
ldlm_glimpse_callback l_glimpse_ast;
+ ldlm_weigh_callback l_weigh_ast;
/**
* Lock export.
void *ei_cb_bl; /* blocking lock callback */
void *ei_cb_cp; /* lock completion callback */
void *ei_cb_gl; /* lock glimpse callback */
+ void *ei_cb_wg; /* lock weigh callback */
void *ei_cbdata; /* Data to be passed into callbacks. */
short ei_async:1; /* async request */
};
return LDLM_POLICY_KEEP_LOCK;
if (lock->l_resource->lr_type == LDLM_EXTENT) {
+ if (lock->l_weigh_ast) {
+ /*
+ * For liblustre, l_weigh_ast should return 0 since it
+ * don't cache pages
+ */
+ page_nr = lock->l_weigh_ast(lock);
+ } else {
struct ldlm_extent *l_extent;
/*
* their extent.
*/
l_extent = &lock->l_policy_data.l_extent;
- page_nr = (l_extent->end - l_extent->start);
+ page_nr = l_extent->end - l_extent->start;
do_div(page_nr, CFS_PAGE_SIZE);
-
-#ifdef __KERNEL__
- /*
- * XXX: In fact this is evil hack, we can't access inode
- * here. For doing it right we need somehow to have number
- * of covered by lock. This should be fixed later when 10718
- * is landed.
- */
- if (lock->l_ast_data != NULL) {
- struct inode *inode = lock->l_ast_data;
- if (page_nr > inode->i_mapping->nrpages)
- page_nr = inode->i_mapping->nrpages;
}
-#endif
lock_cost = 1 + page_nr;
} else {
/*
&lli->lli_fid, LDLM_IBITS, &policy, LCK_CR, &lockh);
if (!rc) {
struct ldlm_enqueue_info einfo = {LDLM_IBITS, LCK_CR,
- llu_md_blocking_ast, ldlm_completion_ast, NULL, inode};
+ llu_md_blocking_ast, ldlm_completion_ast, NULL, NULL,
+ inode};
llu_prep_md_op_data(&op_data, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY);
fid_ver(&lli->lli_fid),
LDLM_FLOCK} };
struct ldlm_enqueue_info einfo = { LDLM_FLOCK, 0, NULL,
- ldlm_flock_completion_ast, NULL, file_lock };
+ ldlm_flock_completion_ast, NULL, NULL, file_lock };
struct lustre_handle lockh = {0};
ldlm_policy_data_t flock;
struct lov_stripe_md *lsm;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_CR,
- llu_md_blocking_ast, ldlm_completion_ast, NULL, NULL };
+ llu_md_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
struct ptlrpc_request *req = NULL;
struct lustre_md md;
ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
if (!rc) {
struct ldlm_enqueue_info einfo = { LDLM_IBITS, mode,
- ll_md_blocking_ast, ldlm_completion_ast, NULL, dir };
+ ll_md_blocking_ast, ldlm_completion_ast,
+ NULL, NULL, dir };
struct lookup_intent it = { .it_op = IT_READDIR };
struct ptlrpc_request *request;
struct md_op_data *op_data;
struct lookup_intent oit = {.it_op = IT_OPEN,
.it_flags = head_filp->f_flags|O_JOIN_FILE};
struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_CW,
- ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL };
+ ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
struct lustre_handle lockh;
struct md_op_data *op_data;
if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
struct ldlm_enqueue_info einfo =
{ LDLM_IBITS, it_to_lock_mode(it), cb_blocking,
- ldlm_completion_ast, NULL, NULL };
+ ldlm_completion_ast, NULL, NULL, NULL };
/* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
NULL, lh);
} else {
struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_EX,
- ldlm_blocking_ast, ldlm_completion_ast, NULL, NULL };
+ ldlm_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
int flags = 0;
/*
{
struct config_llog_data *cld = (struct config_llog_data *)data;
struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
- ldlm_completion_ast, NULL, data};
+ ldlm_completion_ast, NULL, NULL, data};
int rc;
ENTRY;