* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
void ll_truncate(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- loff_t new_size;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu=%#Lx\n",inode->i_ino,
- inode->i_generation, inode, i_size_read(inode),
- i_size_read(inode));
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
- if (lli->lli_size_sem_owner != cfs_current()) {
- EXIT;
- return;
- }
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) to %Lu\n",inode->i_ino,
+ inode->i_generation, inode, i_size_read(inode));
- if (!lli->lli_smd) {
- CDEBUG(D_INODE, "truncate on inode %lu with no objects\n",
- inode->i_ino);
- GOTO(out_unlock, 0);
- }
- LASSERT_SEM_LOCKED(&lli->lli_size_sem);
-
- if (unlikely((ll_i2sbi(inode)->ll_flags & LL_SBI_CHECKSUM) &&
- (i_size_read(inode) & ~CFS_PAGE_MASK))) {
- /* If the truncate leaves a partial page, update its checksum */
- struct page *page = find_get_page(inode->i_mapping,
- i_size_read(inode) >>
- CFS_PAGE_SHIFT);
- if (page != NULL) {
-#if 0 /* XXX */
- struct ll_async_page *llap = llap_cast_private(page);
- if (llap != NULL) {
- char *kaddr = kmap_atomic(page, KM_USER0);
- llap->llap_checksum =
- init_checksum(OSC_DEFAULT_CKSUM);
- llap->llap_checksum =
- compute_checksum(llap->llap_checksum,
- kaddr, CFS_PAGE_SIZE,
- OSC_DEFAULT_CKSUM);
- kunmap_atomic(kaddr, KM_USER0);
- }
- page_cache_release(page);
-#endif
- }
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_TRUNC, 1);
+ if (lli->lli_size_sem_owner == cfs_current()) {
+ LASSERT_SEM_LOCKED(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
}
- new_size = i_size_read(inode);
- ll_inode_size_unlock(inode, 0);
-
EXIT;
return;
-
- out_unlock:
- ll_inode_size_unlock(inode, 0);
} /* ll_truncate */
/**
lcc = ERR_PTR(result);
}
- CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %i %p %p\n",
+ CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
env, io);
return lcc;
* otherwise it will form small read RPC(< 1M), which hurt server
* performance a lot.
*/
- ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), len);
+ ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
GOTO(out, ret = 0);
- if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
- atomic_sub(ret, &ra->ra_cur_pages);
+ if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ cfs_atomic_sub(ret, &ra->ra_cur_pages);
ret = 0;
}
out:
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
- atomic_sub(len, &ra->ra_cur_pages);
+ cfs_atomic_sub(len, &ra->ra_cur_pages);
}
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
ras->ras_requests++;
ras->ras_request_index = 0;
ras->ras_consecutive_requests++;
rar->lrr_reader = current;
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
+ cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ cfs_spin_unlock(&ras->ras_lock);
}
void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
- spin_unlock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
+ cfs_list_del_init(&rar->lrr_linkage);
+ cfs_spin_unlock(&ras->ras_lock);
}
static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
{
struct ll_ra_read *scan;
- list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
if (scan->lrr_reader == current)
return scan;
}
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
bead = ll_ra_read_get_locked(ras);
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
return bead;
}
*/
static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue,
- int index, struct address_space *mapping)
+ pgoff_t index, struct address_space *mapping)
{
struct page *vmpage;
struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
unsigned long off, unsigned long length)
{
- unsigned long start = off > st_off ? off - st_off : 0;
- unsigned long end = off + length > st_off ? off + length - st_off : 0;
+ __u64 start = off > st_off ? off - st_off : 0;
+ __u64 end = off + length > st_off ? off + length - st_off : 0;
unsigned long start_left = 0;
unsigned long end_left = 0;
unsigned long pg_count;
if (end_left > st_pgs)
end_left = st_pgs;
- CDEBUG(D_READA, "start %lu, end %lu start_left %lu end_left %lu \n",
+ CDEBUG(D_READA, "start "LPU64", end "LPU64" start_left %lu end_left %lu \n",
start, end, start_left, end_left);
if (start == end)
* For stride I/O mode, just check whether the idx is inside
* the ria_pages. */
return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
- (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages;
+ (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
+ ria->ria_length < ria->ria_pages);
}
static int ll_read_ahead_pages(const struct lu_env *env,
/* FIXME: This assertion only is valid when it is for
* forward read-ahead, it will be fixed when backward
* read-ahead is implemented */
- LASSERTF(page_idx > ria->ria_stoff, "since %lu in the"
- " gap of ra window,it should bigger than stride"
- " offset %lu \n", page_idx, ria->ria_stoff);
-
+ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu"
+ "rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx,
+ ria->ria_start, ria->ria_end, ria->ria_stoff,
+ ria->ria_length, ria->ria_pages);
offset = page_idx - ria->ria_stoff;
offset = offset % (ria->ria_length);
if (offset > ria->ria_pages) {
RETURN(0);
}
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
if (vio->cui_ra_window_set)
bead = &vio->cui_bead;
else
end = ras->ras_window_start + ras->ras_window_len - 1;
}
if (end != 0) {
+ unsigned long tmp_end;
+ /*
+ * Align RA window to an optimal boundary.
+ *
+ * XXX This would be better to align to cl_max_pages_per_rpc
+ * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may
+ * be aligned to the RAID stripe size in the future and that
+ * is more important than the RPC size.
+ */
+ tmp_end = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1))) - 1;
+ if (tmp_end > start)
+ end = tmp_end;
+
/* Truncate RA window to end of file */
end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
+
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
}
ria->ria_length = ras->ras_stride_length;
ria->ria_pages = ras->ras_stride_pages;
}
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
if (end == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
if (ra_end < ras->ras_next_readahead &&
index_in_window(ra_end, ras->ras_window_start, 0,
ras->ras_window_len)) {
ras->ras_next_readahead = ra_end;
RAS_CDEBUG(ras);
}
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
}
RETURN(ret);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
{
- spin_lock_init(&ras->ras_lock);
+ cfs_spin_lock_init(&ras->ras_lock);
ras_reset(ras, 0);
ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
+ CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
int zero = 0, stride_detect = 0, ra_miss = 0;
ENTRY;
- spin_lock(&sbi->ll_lock);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
ras->ras_consecutive_pages++;
ras->ras_last_readpage = index;
ras_set_start(ras, index);
- ras->ras_next_readahead = max(ras->ras_window_start,
- ras->ras_next_readahead);
+
+ if (stride_io_mode(ras))
+ /* Since stride readahead is sentivite to the offset
+ * of read-ahead, so we use original offset here,
+ * instead of ras_window_start, which is 1M aligned*/
+ ras->ras_next_readahead = max(index,
+ ras->ras_next_readahead);
+ else
+ ras->ras_next_readahead = max(ras->ras_window_start,
+ ras->ras_next_readahead);
RAS_CDEBUG(ras);
/* Trigger RA in the mmap case where ras_consecutive_requests
out_unlock:
RAS_CDEBUG(ras);
ras->ras_request_index++;
- spin_unlock(&ras->ras_lock);
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&ras->ras_lock);
return;
}
}
ll_cl_fini(lcc);
} else {
+ unlock_page(vmpage);
result = PTR_ERR(lcc);
}
RETURN(result);