LL_IT2STR(it));
if (de->d_inode == NULL) {
+ __u64 ibits;
+
/* We can only use negative dentries if this is stat or lookup,
for opens and stuff we do need to query server. */
/* If there is IT_CREAT in intent op set, then we must throw
if (de->d_flags & DCACHE_LUSTRE_INVALID)
RETURN(0);
- rc = ll_have_md_lock(parent, MDS_INODELOCK_UPDATE, LCK_MINMODE);
+ ibits = MDS_INODELOCK_UPDATE;
+ rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
GOTO(out_sa, rc);
}
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_client_handle **och_p;
__u64 *och_usecount;
+ __u64 ibits;
/*
* We used to check for MDS_INODELOCK_OPEN here, but in fact
och_usecount = &lli->lli_open_fd_read_count;
}
/* Check for the proper lock. */
- if (!ll_have_md_lock(inode, MDS_INODELOCK_LOOKUP, LCK_MINMODE))
+ ibits = MDS_INODELOCK_LOOKUP;
+ if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
goto do_lock;
cfs_down(&lli->lli_och_sem);
if (*och_p) { /* Everything is open already, do nothing */
RETURN(-ENOSYS);
}
-int ll_have_md_lock(struct inode *inode, __u64 bits, ldlm_mode_t l_req_mode)
+/**
+ * test if some locks matching bits and l_req_mode are acquired
+ * - bits can be in different locks
+ * - if found clear the common lock bits in *bits
+ * - the bits not found, are kept in *bits
+ * \param inode [IN]
+ * \param bits [IN] searched lock bits [IN]
+ * \param l_req_mode [IN] searched lock mode
+ * \retval boolean, true iff all bits are found
+ */
+int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
{
struct lustre_handle lockh;
- ldlm_policy_data_t policy = { .l_inodebits = {bits}};
+ ldlm_policy_data_t policy;
ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
(LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
struct lu_fid *fid;
int flags;
+ int i;
ENTRY;
if (!inode)
ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
- if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, &policy,
- mode, &lockh)) {
- RETURN(1);
+ for (i = 0; i < MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
+ policy.l_inodebits.bits = *bits & (1 << i);
+ if (policy.l_inodebits.bits == 0)
+ continue;
+
+ if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
+ &policy, mode, &lockh)) {
+ struct ldlm_lock *lock;
+
+ lock = ldlm_handle2lock(&lockh);
+ if (lock) {
+ *bits &=
+ ~(lock->l_policy_data.l_inodebits.bits);
+ LDLM_LOCK_PUT(lock);
+ } else {
+ *bits &= ~policy.l_inodebits.bits;
+ }
+ }
}
- RETURN(0);
+ RETURN(*bits == 0);
}
ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
}
ll_lookup_finish_locks(&oit, dentry);
- } else if (!ll_have_md_lock(dentry->d_inode, ibits, LCK_MINMODE)) {
+ } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
obd_valid valid = OBD_MD_FLGETATTR;
struct md_op_data *op_data;
cfs_spin_lock(&capa_lock);
cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
+ __u64 ibits;
+
LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
if (!capa_is_to_expire(ocapa)) {
* dir, or its inode is opened, or client holds LOOKUP
* lock.
*/
+ /* ibits may be changed by ll_have_md_lock() so we have
+ * to set it each time */
+ ibits = MDS_INODELOCK_LOOKUP;
if (capa_for_mds(&ocapa->c_capa) &&
!S_ISDIR(ocapa->u.cli.inode->i_mode) &&
obd_capa_open_count(ocapa) == 0 &&
!ll_have_md_lock(ocapa->u.cli.inode,
- MDS_INODELOCK_LOOKUP,
- LCK_MINMODE)) {
+ &ibits, LCK_MINMODE)) {
DEBUG_CAPA(D_SEC, &ocapa->c_capa,
"skip renewal for");
sort_add_capa(ocapa, &ll_idle_capas);
c_list) {
if (!capa_is_expired(ocapa)) {
if (!next)
- update_capa_timer(ocapa, ocapa->c_expiry);
+ update_capa_timer(ocapa,
+ ocapa->c_expiry);
break;
}
if (rc == -EIO && !capa_is_expired(ocapa)) {
delay_capa_renew(ocapa, 120);
DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
- "renewal failed: -EIO, retry in 2 mins");
+ "renewal failed: -EIO, "
+ "retry in 2 mins");
ll_capa_renewal_retries++;
GOTO(retry, rc);
} else {
ssize_t ll_file_lockless_io(struct file *, char *, size_t, loff_t *, int);
void ll_clear_file_contended(struct inode*);
int ll_sync_page_range(struct inode *, struct address_space *, loff_t, size_t);
-int ll_readahead(const struct lu_env *env, struct cl_io *io, struct ll_readahead_state *ras,
- struct address_space *mapping, struct cl_page_list *queue, int flags);
+int ll_readahead(const struct lu_env *env, struct cl_io *io,
+ struct ll_readahead_state *ras, struct address_space *mapping,
+ struct cl_page_list *queue, int flags);
/* llite/file.c */
extern struct file_operations ll_file_operations;
extern struct inode_operations ll_file_inode_operations;
extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
__u64);
-extern int ll_have_md_lock(struct inode *inode, __u64 bits, ldlm_mode_t l_req_mode);
+extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
+ ldlm_mode_t l_req_mode);
extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
struct lustre_handle *lockh);
-int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *, __u64 bits);
+int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
+ __u64 bits);
int ll_revalidate_nd(struct dentry *dentry, struct nameidata *nd);
int ll_file_open(struct inode *inode, struct file *file);
int ll_file_release(struct inode *inode, struct file *file);
LASSERT(lock->l_flags & LDLM_FL_CANCELING);
/* For OPEN locks we differentiate between lock modes - CR, CW. PR - bug 22891 */
- if ((bits & MDS_INODELOCK_LOOKUP) &&
- ll_have_md_lock(inode, MDS_INODELOCK_LOOKUP, LCK_MINMODE))
- bits &= ~MDS_INODELOCK_LOOKUP;
- if ((bits & MDS_INODELOCK_UPDATE) &&
- ll_have_md_lock(inode, MDS_INODELOCK_UPDATE, LCK_MINMODE))
- bits &= ~MDS_INODELOCK_UPDATE;
- if ((bits & MDS_INODELOCK_OPEN) &&
- ll_have_md_lock(inode, MDS_INODELOCK_OPEN, mode))
- bits &= ~MDS_INODELOCK_OPEN;
+ if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE))
+ ll_have_md_lock(inode, &bits, LCK_MINMODE);
+
+ if (bits & MDS_INODELOCK_OPEN)
+ ll_have_md_lock(inode, &bits, mode);
fid = ll_inode2fid(inode);
if (lock->l_resource->lr_name.name[0] != fid_seq(fid) ||
unlock_dentry(*de);
}
} else {
+ __u64 ibits;
+
ll_dops_init(*de, 1, 1);
/* Check that parent has UPDATE lock. If there is none, we
cannot afford to hash this dentry (done by ll_d_add) as it
might get picked up later when UPDATE lock will appear */
- if (ll_have_md_lock(parent, MDS_INODELOCK_UPDATE, LCK_MINMODE)) {
+ ibits = MDS_INODELOCK_UPDATE;
+ if (ll_have_md_lock(parent, &ibits, LCK_MINMODE)) {
spin_lock(&dcache_lock);
ll_d_add(*de, NULL);
spin_unlock(&dcache_lock);