* inode ourselves so we can call obdo_from_inode() always. */
if (ia_valid & (lsm ? ~(ATTR_SIZE | ATTR_FROM_OPEN | ATTR_RAW) : ~0)) {
struct lustre_md md;
- int save_valid;
ll_prepare_mdc_op_data(&op_data, inode, NULL, NULL, 0, 0);
rc = mdc_setattr(sbi->ll_mdc_exp, &op_data,
*
* NB: ATTR_SIZE will only be set at this point if the size
* resides on the MDS, ie, this file has no objects. */
- save_valid = attr->ia_valid;
attr->ia_valid &= ~ATTR_SIZE;
inode_setattr(inode, attr);
- attr->ia_valid = save_valid;
ll_update_inode(inode, md.body, md.lsm);
ptlrpc_req_finished(request);
ldlm_policy_data_t policy;
ldlm_mode_t mode;
struct page *page = NULL;
+ struct ll_inode_info *lli = ll_i2info(inode);
__u64 kms, old_mtime;
unsigned long pgoff, size, rand_read, seq_read;
int rc = 0;
ENTRY;
- if (ll_i2info(inode)->lli_smd == NULL) {
+ if (lli->lli_smd == NULL) {
CERROR("No lsm on fault?\n");
RETURN(NULL);
}
policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
- vma, inode->i_ino, policy.l_extent.start,
- policy.l_extent.end);
+ vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
mode = mode_from_vma(vma);
old_mtime = LTIME_S(inode->i_mtime);
- rc = ll_extent_lock(fd, inode, ll_i2info(inode)->lli_smd, mode, &policy,
+ rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
&lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
if (rc != 0)
RETURN(NULL);
/* XXX change inode size without i_sem hold! there is a race condition
* with truncate path. (see ll_extent_lock) */
- kms = lov_merge_size(ll_i2info(inode)->lli_smd, 1);
+ down(&lli->lli_size_sem);
+ kms = lov_merge_size(lli->lli_smd, 1);
pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (pgoff >= size)
+ if (pgoff >= size) {
+ up(&lli->lli_size_sem);
ll_glimpse_size(inode);
- else
+ } else {
inode->i_size = kms;
+ up(&lli->lli_size_sem);
+ }
/* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
* the kernel will not read other pages not covered by ldlm in
*
* 1) Further extending writes may have landed in the page cache
* since a partial write first queued this page requiring us
- * to write more from the page cache.
+ * to write more from the page cache. (No further races are possible, since
+ * by the time this is called, the page is locked.)
* 2) We might have raced with truncate and want to avoid performing
* write RPCs that are just going to be thrown away by the
* truncate's punch on the storage targets.
*/
static int ll_ap_refresh_count(void *data, int cmd)
{
+ struct ll_inode_info *lli;
struct ll_async_page *llap;
struct lov_stripe_md *lsm;
struct page *page;
- __u64 kms;
+ __u64 kms, retval;
ENTRY;
/* readpage queues with _COUNT_STABLE, shouldn't get here. */
RETURN(PTR_ERR(llap));
page = llap->llap_page;
- lsm = ll_i2info(page->mapping->host)->lli_smd;
+ lli = ll_i2info(page->mapping->host);
+ lsm = lli->lli_smd;
+
+ down(&lli->lli_size_sem);
kms = lov_merge_size(lsm, 1);
+ up(&lli->lli_size_sem);
/* catch race with truncate */
if (((__u64)page->index << PAGE_SHIFT) >= kms)
EXIT;
}
-static int ll_page_matches(struct page *page)
+static int ll_page_matches(struct page *page, int readahead)
{
struct lustre_handle match_lockh = {0};
struct inode *inode = page->mapping->host;
page_extent.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
page_extent.l_extent.end =
page_extent.l_extent.start + PAGE_CACHE_SIZE - 1;
- flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
+ flags = LDLM_FL_TEST_LOCK;
+ if (!readahead)
+ flags |= LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
matches = obd_match(ll_i2sbi(inode)->ll_osc_exp,
ll_i2info(inode)->lli_smd, LDLM_EXTENT,
&page_extent, LCK_PR | LCK_PW, &flags, inode,
goto next_page;
/* bail when we hit the end of the lock. */
- if ((rc = ll_page_matches(page)) <= 0) {
+ if ((rc = ll_page_matches(page, 1)) <= 0) {
LL_CDEBUG_PAGE(D_READA | D_PAGE, page,
"lock match failed: rc %d\n", rc);
ll_ra_stats_inc(mapping, RA_STAT_FAILED_MATCH);
GOTO(out_oig, rc = 0);
}
- rc = ll_page_matches(page);
+ rc = ll_page_matches(page, 0);
if (rc < 0) {
LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n", rc);
GOTO(out, rc);