struct list_head l_children;
struct list_head l_childof;
struct list_head l_res_link; /*position in one of three res lists*/
- struct list_head l_inode_link; /* position in inode info list */
struct list_head l_export_chain; /* per-export chain of locks */
- struct list_head l_pending_chain; /* locks with callbacks pending */
+ struct list_head l_pending_chain; /* locks with callbacks pending*/
unsigned long l_callback_timeout;
ldlm_mode_t l_req_mode;
lock; \
})
+struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
void ldlm_lock_put(struct ldlm_lock *lock);
void ldlm_lock_destroy(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
void *data, __u32 data_len);
int ldlm_cli_convert(struct lustre_handle *, int new_mode, int *flags);
int ldlm_cli_cancel(struct lustre_handle *lockh);
+int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, __u64 *res_id);
/* mds/handler.c */
/* This has to be here because recurisve inclusion sucks. */
char *lli_symlink_name;
struct lustre_handle lli_intent_lock_handle;
struct semaphore lli_open_sem;
- struct list_head lli_osc_locks;
};
#define LL_SUPER_MAGIC 0x0BD00BD0
list_del(&lock->l_export_chain);
lock->l_export = NULL;
- lock->l_flags = LDLM_FL_DESTROYED;
+ lock->l_flags |= LDLM_FL_DESTROYED;
l_unlock(&lock->l_resource->lr_namespace->ns_lock);
LDLM_LOCK_PUT(lock);
lock->l_refc = 1;
INIT_LIST_HEAD(&lock->l_children);
INIT_LIST_HEAD(&lock->l_res_link);
- INIT_LIST_HEAD(&lock->l_inode_link);
INIT_LIST_HEAD(&lock->l_export_chain);
INIT_LIST_HEAD(&lock->l_pending_chain);
init_waitqueue_head(&lock->l_waitq);
list_for_each(tmp, queue) {
lock = list_entry(tmp, struct ldlm_lock, l_res_link);
- if (lock->l_flags & LDLM_FL_CBPENDING)
+ if (lock->l_flags & (LDLM_FL_CBPENDING | LDLM_FL_DESTROYED))
continue;
/* lock_convert() takes the resource lock, so we're sure that
if (lock->l_connh) {
LDLM_DEBUG(lock, "client-side cancel");
+ /* Set this flag to prevent others from getting new references*/
+ l_lock(&lock->l_resource->lr_namespace->ns_lock);
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ l_unlock(&lock->l_resource->lr_namespace->ns_lock);
+
req = ptlrpc_prep_req2(lock->l_connh, LDLM_CANCEL, 1, &size,
NULL);
if (!req)
LDLM_LOCK_PUT(lock);
return rc;
}
+
+/* Cancel all locks on a given resource that have 0 readers/writers */
+int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, __u64 *res_id)
+{
+ struct ldlm_resource *res;
+ struct list_head *tmp, *next, list = LIST_HEAD_INIT(list);
+ struct ldlm_ast_work *w;
+ ENTRY;
+
+ res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
+ if (res == NULL)
+ RETURN(-ENOMEM);
+
+ l_lock(&ns->ns_lock);
+ list_for_each(tmp, &res->lr_granted) {
+ struct ldlm_lock *lock;
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (lock->l_readers || lock->l_writers)
+ continue;
+
+ /* Setting the CBPENDING flag is a little misleading, but
+ * prevents an important race; namely, once CBPENDING is set,
+ * the lock can accumulate no more readers/writers. Since
+ * readers and writers are already zero here, ldlm_lock_decref
+ * won't see this flag and call l_blocking_ast */
+ lock->l_flags |= LDLM_FL_CBPENDING;
+
+ OBD_ALLOC(w, sizeof(*w));
+ LASSERT(w);
+
+ w->w_lock = LDLM_LOCK_GET(lock);
+ list_add(&w->w_list, &list);
+ }
+ l_unlock(&ns->ns_lock);
+
+ list_for_each_safe(tmp, next, &list) {
+ struct lustre_handle lockh;
+ int rc;
+ w = list_entry(tmp, struct ldlm_ast_work, w_list);
+
+ ldlm_lock2handle(w->w_lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh);
+ if (rc != ELDLM_OK)
+ CERROR("ldlm_cli_cancel: %d\n", rc);
+
+ LDLM_LOCK_PUT(w->w_lock);
+ list_del(&w->w_list);
+ OBD_FREE(w, sizeof(*w));
+ }
+
+ RETURN(0);
+}
struct obdo oa;
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
- //struct obd_device *obddev = class_conn2obd(&sbi->ll_osc_conn);
- struct list_head *tmp, *next;
ENTRY;
}
ptlrpc_free_req(fd->fd_req);
- // XXX Phil lov devices have no namespace
- //l_lock(&obddev->obd_namespace->ns_lock);
- list_for_each_safe(tmp, next, &lli->lli_osc_locks) {
- struct ldlm_lock *lock;
- struct lustre_handle lockh;
- lock = list_entry(tmp, struct ldlm_lock, l_inode_link);
-
- if (!list_empty(&lock->l_inode_link)) {
- list_del_init(&lock->l_inode_link);
- LDLM_LOCK_PUT(lock);
- }
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- if (rc < 0)
- CERROR("ldlm_cli_cancel: %d\n", rc);
- }
- //l_unlock(&obddev->obd_namespace->ns_lock);
+ //ldlm_cli_cancel_unused();
EXIT;
up(&inode->i_sem);
ldlm_lock2handle(lock, &lockh);
- l_lock(&lock->l_resource->lr_namespace->ns_lock);
- if (!list_empty(&lock->l_inode_link)) {
- list_del_init(&lock->l_inode_link);
- LDLM_LOCK_PUT(lock);
- }
- l_unlock(&lock->l_resource->lr_namespace->ns_lock);
-
rc = ldlm_cli_cancel(&lockh);
if (rc != ELDLM_OK)
CERROR("ldlm_cli_cancel failed: %d\n", rc);
ENTRY;
sema_init(&ii->lli_open_sem, 1);
- INIT_LIST_HEAD(&ii->lli_osc_locks);
/* core attributes first */
if (body->valid & OBD_MD_FLID)
inode->i_generation = body->generation;
if (body->valid & OBD_MD_FLRDEV)
inode->i_rdev = body->extra;
- //if (body->valid & OBD_MD_FLSIZE)
- // inode->i_size = body->size;
+ if (body->valid & OBD_MD_FLSIZE)
+ inode->i_size = body->size;
//if (body->valid & OBD_MD_FLEASIZE)
__u64 res_id[RES_NAME_SIZE] = { md->lmd_object_id };
struct obd_device *obddev = class_conn2obd(connh);
struct ldlm_extent *extent = extentp;
- struct ldlm_lock *lock;
- struct inode *inode = data;
- struct ll_inode_info *lli = ll_i2info(inode);
int rc;
__u32 mode2;
parent_lock, res_id, type, extent,
sizeof(extent), mode, flags, ldlm_completion_ast,
callback, data, datalen, lockh);
- if (rc)
- return rc;
-
- /* This code must change if we ever stop passing an inode in as data */
- /* This is ldlm and llite code. It makes me sad that it's in
- * osc_request.c --phil */
- lock = ldlm_handle2lock(lockh);
- if (lock) {
- /* Lock already has an extra ref from handle2lock */
- l_lock(&obddev->obd_namespace->ns_lock);
- list_add(&lock->l_inode_link, &lli->lli_osc_locks);
- l_unlock(&obddev->obd_namespace->ns_lock);
- }
-
return rc;
}