#define LDLM_FL_DESTROYED (1 << 6)
#define LDLM_FL_WAIT_NOREPROC (1 << 7)
+#define LDLM_CB_BLOCKING 1
+#define LDLM_CB_DYING 2
+
#define L2B(c) (1 << c)
/* compatibility matrix */
typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
struct ldlm_lock_desc *new, void *data,
- __u32 data_len);
+ __u32 data_len, int flag);
typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags);
/* mds/handler.c */
/* This has to be here because recurisve inclusion sucks. */
int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, __u32 data_len);
+ void *data, __u32 data_len, int flag);
#endif /* __KERNEL__ */
extern struct inode_operations ll_file_inode_operations;
struct ldlm_lock;
int ll_lock_callback(struct ldlm_lock *, struct ldlm_lock_desc *, void *data,
- __u32 data_len);
+ __u32 data_len, int flag);
int ll_size_lock(struct inode *, struct lov_stripe_md *, __u64 start, int mode,
struct lustre_handle **);
int ll_size_unlock(struct inode *, struct lov_stripe_md *, int mode,
LDLM_LOCK_PUT(lock->l_parent);
if (lock->l_refc == 0 && (lock->l_flags & LDLM_FL_DESTROYED)) {
+ lock->l_blocking_ast(lock, NULL, lock->l_data,
+ lock->l_data_len, LDLM_CB_DYING);
+
spin_lock(&ns->ns_counter_lock);
ns->ns_locks--;
spin_unlock(&ns->ns_counter_lock);
/* FIXME: need a real 'desc' here */
lock->l_blocking_ast(lock, NULL, lock->l_data,
- lock->l_data_len);
+ lock->l_data_len, LDLM_CB_BLOCKING);
} else
l_unlock(&lock->l_resource->lr_namespace->ns_lock);
if (w->w_blocking)
rc = w->w_lock->l_blocking_ast
(w->w_lock, &w->w_desc, w->w_data,
- w->w_datalen);
+ w->w_datalen, LDLM_CB_BLOCKING);
else
rc = w->w_lock->l_completion_ast(w->w_lock, w->w_flags);
if (rc)
static int ldlm_server_blocking_ast(struct ldlm_lock *lock,
struct ldlm_lock_desc *desc,
- void *data, __u32 data_len)
+ void *data, __u32 data_len, int flag)
{
struct ldlm_request *body;
struct ptlrpc_request *req;
int rc = 0, size = sizeof(*body);
ENTRY;
+ if (flag == LDLM_CB_DYING) {
+ /* Don't need to do anything when the lock is freed. */
+ RETURN(0);
+ }
+
req = ptlrpc_prep_req(&lock->l_export->exp_ldlm_data.led_import,
LDLM_BL_CALLBACK, 1, &size, NULL);
if (!req)
"callback (%p)", lock->l_blocking_ast);
if (lock->l_blocking_ast != NULL) {
lock->l_blocking_ast(lock, &dlm_req->lock_desc,
- lock->l_data, lock->l_data_len);
+ lock->l_data, lock->l_data_len,
+ LDLM_CB_BLOCKING);
}
} else
LDLM_DEBUG(lock, "Lock still has references, will be"
*/
static int ldlm_test_blocking_ast(struct ldlm_lock *lock,
struct ldlm_lock_desc *new,
- void *data, __u32 data_len)
+ void *data, __u32 data_len, int flag)
{
int rc;
struct lustre_handle lockh;
ENTRY;
- LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- if (rc < 0) {
- CERROR("ldlm_cli_cancel: %d\n", rc);
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ LDLM_DEBUG(lock, "We're blocking. Cancelling lock");
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh);
+ if (rc < 0) {
+ CERROR("ldlm_cli_cancel: %d\n", rc);
+ LBUG();
+ }
+ break;
+ case LDLM_CB_DYING:
+ LDLM_DEBUG(lock, "this lock is being freed");
+ break;
+ default:
LBUG();
}
/* blocking ast for basic tests. noop */
static int ldlm_blocking_ast(struct ldlm_lock *lock,
struct ldlm_lock_desc *new,
- void *data, __u32 data_len)
+ void *data, __u32 data_len, int flag)
{
ENTRY;
- CERROR("ldlm_blocking_ast: lock=%p, new=%p\n", lock, new);
+ CERROR("ldlm_blocking_ast: lock=%p, new=%p, flag=%d\n", lock, new,
+ flag);
RETURN(0);
}
ENTRY;
if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV)) {
-
+ LDLM_FL_BLOCK_CONV)) {
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock");
RETURN(0);
- }
+ }
- if (lock->l_granted_mode != lock->l_req_mode)
+ if (lock->l_granted_mode != lock->l_req_mode)
CERROR("completion ast called with non-granted lock\n");
/* add to list of granted locks */
struct ldlm_lock *lock;
int rc = 0;
ENTRY;
-
+
spin_lock(&ctl_lock);
if(list_empty(&lock_list)) {
CERROR("lock_list is empty\n");
spin_unlock(&ctl_lock);
- RETURN(0);
- }
-
+ RETURN(0);
+ }
+
/* delete from list */
- lock_info = list_entry(lock_list.next,
+ lock_info = list_entry(lock_list.next,
struct ldlm_test_lock, l_link);
list_del(lock_list.next);
num_locks--;
spin_unlock(&ctl_lock);
-
+
/* decrement and free the info */
lock = ldlm_handle2lock(&lock_info->l_lockh);
ldlm_lock_decref(&lock_info->l_lockh, lock->l_granted_mode);
}
static int ldlm_do_enqueue(struct ldlm_test_thread *thread)
-{
+{
struct lustre_handle lockh;
__u64 res_id[3] = {0};
__u32 lock_mode;
get_random_bytes(&random, sizeof(random));
ext.start = random % num_extents;
get_random_bytes(&random, sizeof(random));
- ext.end = random %
+ ext.end = random %
(num_extents - (int)ext.start) + ext.start;
- LDLM_DEBUG_NOLOCK("about to enqueue with resource %d, mode %d,"
- " extent %d -> %d",
- (int)res_id[0],
- lock_mode,
- (int)ext.start,
- (int)ext.end);
-
- rc = ldlm_match_or_enqueue(®ress_connh,
- NULL,
- thread->obddev->obd_namespace,
- NULL, res_id, LDLM_EXTENT, &ext,
- sizeof(ext), lock_mode, &flags,
- ldlm_test_completion_ast,
- ldlm_test_blocking_ast,
+ LDLM_DEBUG_NOLOCK("about to enqueue with resource "LPX64", mode %d,"
+ " extent "LPX64" -> "LPX64, res_id[0], lock_mode,
+ ext.start, ext.end);
+
+ rc = ldlm_match_or_enqueue(®ress_connh, NULL,
+ thread->obddev->obd_namespace,
+ NULL, res_id, LDLM_EXTENT, &ext,
+ sizeof(ext), lock_mode, &flags,
+ ldlm_test_completion_ast,
+ ldlm_test_blocking_ast,
NULL, 0, &lockh);
atomic_inc(&locks_requested);
}
static int ldlm_do_convert(void)
-{
+{
__u32 lock_mode;
unsigned char random;
int flags = 0, rc = 0;
}
/*
- * Adjust reference counts.
+ * Adjust reference counts.
* FIXME: This is technically a bit... wrong,
* since we don't know when/if the convert succeeded
*/
*/
dec_chance = chance_left * num_locks / max_locks;
chance_left -= dec_chance;
-
+
/* FIXME: conversions temporarily disabled
* until they are working correctly.
*/
atomic_read(&locks_matched));
spin_lock(&ctl_lock);
- LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
+ LDLM_DEBUG_NOLOCK("lock references currently held: %d, ",
num_locks);
spin_unlock(&ctl_lock);
- /*
+ /*
* We don't sleep after a lock being blocked, so let's
* make sure other things can run.
*/
RETURN(0);
}
-int ldlm_regression_start(struct obd_device *obddev,
- struct lustre_handle *connh,
- unsigned int threads, unsigned int max_locks_in,
- unsigned int num_resources_in,
+int ldlm_regression_start(struct obd_device *obddev,
+ struct lustre_handle *connh,
+ unsigned int threads, unsigned int max_locks_in,
+ unsigned int num_resources_in,
unsigned int num_extents_in)
{
int i, rc = 0;
regression_running = 1;
spin_unlock(&ctl_lock);
- regress_connh = *connh;
+ regress_connh = *connh;
max_locks = max_locks_in;
num_resources = num_resources_in;
num_extents = num_extents_in;
/* decrement all held locks */
while (!list_empty(&lock_list)) {
struct ldlm_lock *lock;
- struct ldlm_test_lock *lock_info =
- list_entry(lock_list.next, struct ldlm_test_lock,
+ struct ldlm_test_lock *lock_info =
+ list_entry(lock_list.next, struct ldlm_test_lock,
l_link);
list_del(lock_list.next);
num_locks--;
}
int ll_lock_callback(struct ldlm_lock *lock, struct ldlm_lock_desc *new,
- void *data, __u32 data_len)
+ void *data, __u32 data_len, int flag)
{
struct inode *inode = data;
struct lustre_handle lockh;
if (inode == NULL)
LBUG();
- down(&inode->i_sem);
- CDEBUG(D_INODE, "invalidating obdo/inode %ld\n", inode->i_ino);
- /* FIXME: do something better than throwing away everything */
- invalidate_inode_pages(inode);
- up(&inode->i_sem);
-
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- if (rc != ELDLM_OK)
- CERROR("ldlm_cli_cancel failed: %d\n", rc);
+
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh);
+ if (rc != ELDLM_OK)
+ CERROR("ldlm_cli_cancel failed: %d\n", rc);
+ break;
+ case LDLM_CB_DYING:
+ down(&inode->i_sem);
+ CDEBUG(D_INODE, "invalidating obdo/inode %ld\n", inode->i_ino);
+ /* FIXME: do something better than throwing away everything */
+ invalidate_inode_pages(inode);
+ up(&inode->i_sem);
+ break;
+ default:
+ LBUG();
+ }
+
RETURN(0);
}
return 0;
}
+static int ll_inode_revalidate(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ ENTRY;
+
+ if (!inode)
+ RETURN(0);
+
+ RETURN(ll_file_size(inode, ll_i2info(inode)->lli_smd));
+}
+
struct file_operations ll_file_operations = {
read: ll_file_read,
write: ll_file_write,
};
struct inode_operations ll_file_inode_operations = {
- truncate: ll_truncate,
- setattr: ll_setattr
+ truncate: ll_truncate,
+ setattr: ll_setattr,
+ revalidate: ll_inode_revalidate
};
}
static int mdc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, __u32 data_len)
+ void *data, __u32 data_len, int flag)
{
int rc;
struct inode *inode = data;
RETURN(-EINVAL);
}
- /* FIXME: do something better than throwing away everything */
- if (inode == NULL)
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh);
+ if (rc < 0) {
+ CERROR("ldlm_cli_cancel: %d\n", rc);
+ LBUG();
+ }
+ break;
+ case LDLM_CB_DYING:
+ /* FIXME: do something better than throwing away everything */
+ if (inode == NULL)
+ LBUG();
+ if (S_ISDIR(inode->i_mode)) {
+ CDEBUG(D_INODE, "invalidating inode %ld\n",
+ inode->i_ino);
+ invalidate_inode_pages(inode);
+ }
+ break;
+ default:
LBUG();
- if (S_ISDIR(inode->i_mode)) {
- CDEBUG(D_INODE, "invalidating inode %ld\n", inode->i_ino);
- invalidate_inode_pages(inode);
}
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- if (rc < 0) {
- CERROR("ldlm_cli_cancel: %d\n", rc);
- LBUG();
- }
RETURN(0);
}
}
int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
- void *data, __u32 data_len)
+ void *data, __u32 data_len, int flag)
{
int do_ast;
ENTRY;
+ if (flag == LDLM_CB_DYING) {
+ /* Don't need to do anything when the lock is freed. */
+ RETURN(0);
+ }
+
l_lock(&lock->l_resource->lr_namespace->ns_lock);
lock->l_flags |= LDLM_FL_CBPENDING;
do_ast = (!lock->l_readers && !lock->l_writers);
name = lustre_msg_buf(req->rq_reqmsg, offset + 1);
namelen = req->rq_reqmsg->buflens[offset + 1] - 1;
+#warning FIXME: if mds_name2locked_dentry decrefs this lock, we must not
+ memcpy(&child_lockh, &lockh, sizeof(child_lockh));
dchild = mds_name2locked_dentry(obd, de, NULL, name, namelen,
LCK_EX, &child_lockh, lock_mode);