unsigned int orig_ids[MAXQUOTAS] = {0, 0};
struct llog_cookie *fcc = NULL;
struct filter_obd *filter;
- int rc, err, locked = 0, sync = 0;
+ int rc, err, sync = 0;
loff_t old_size = 0;
unsigned int ia_valid;
struct inode *inode;
if (fcc != NULL)
*fcc = oa->o_lcookie;
}
-
- if (ia_valid & ATTR_SIZE || ia_valid & (ATTR_UID | ATTR_GID)) {
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
DQUOT_INIT(inode);
+ /* Filter truncates and writes are serialized by
+ * i_alloc_sem, see the comment in
+ * filter_preprw_write.*/
+ if (ia_valid & ATTR_SIZE)
+ down_write(&inode->i_alloc_sem);
LOCK_INODE_MUTEX(inode);
old_size = i_size_read(inode);
- locked = 1;
}
/* VBR: version recovery check */
rc = err;
}
- if (locked) {
- UNLOCK_INODE_MUTEX(inode);
- locked = 0;
- }
-
EXIT;
out_unlock:
- if (locked)
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
UNLOCK_INODE_MUTEX(inode);
-
+ if (ia_valid & ATTR_SIZE)
+ up_write(&inode->i_alloc_sem);
if (fcc)
OBD_FREE(fcc, sizeof(*fcc));
GOTO(cleanup, rc);
cleanup_phase = 4;
+ /* Filter truncate first locks i_mutex then partally truncated
+ * page, filter write code first locks pages then take
+ * i_mutex. To avoid a deadlock in case of concurrent
+ * punch/write requests from one client, filter writes and
+ * filter truncates are serialized by i_alloc_sem, allowing
+ * multiple writes or single truncate. */
+ down_read(&dentry->d_inode->i_alloc_sem);
+
do_gettimeofday(&start);
for (i = 0, lnb = res; i < *pages; i++, lnb++) {
}
}
filter_grant_commit(exp, *pages, res);
+ up_read(&dentry->d_inode->i_alloc_sem);
}
case 3:
filter_iobuf_put(&obd->u.filter, iobuf, oti);
i_size_read(inode) > fo->fo_readcache_max_filesize))
filter_invalidate_cache(obd, obj, nb, inode);
+ up_read(&inode->i_alloc_sem);
+
RETURN(rc);
}