The bug happens when 16TB-4KB limit is exceeded during write.
Add check for maximum file size on client and server sides.
Xyratex-bug-id: MRP-2131
Change-Id: I73f0ee803670ada869c2618f275049948668848e
Signed-off-by: Andriy Skulysh <andriy.skulysh@seagate.com>
Reviewed-on: http://review.whamcloud.com/12600
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: James Simmons <uja.ornl@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
struct file *file, enum cl_io_type iot,
loff_t *ppos, size_t count)
{
- struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ loff_t end;
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct cl_io *io;
ssize_t result;
io = ccc_env_thread_io(env);
ll_io_init(io, file, iot == CIT_WRITE);
+ /* The maximum Lustre file size is variable, based on the
+ * OST maximum object size and number of stripes. This
+ * needs another check in addition to the VFS checks earlier. */
+ end = (io->u.ci_wr.wr_append ? i_size_read(inode) : *ppos) + count;
+ if (end > ll_file_maxbytes(inode)) {
+ result = -EFBIG;
+ CDEBUG(D_INODE, "%s: file "DFID" offset %llu > maxbytes "LPU64
+ ": rc = %zd\n", ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&lli->lli_fid), end, ll_file_maxbytes(inode),
+ result);
+ RETURN(result);
+ }
+
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env);
bool range_locked = false;
unsigned long *blocks, int create)
{
int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ pgoff_t bitmap_max_page_index;
unsigned long *b;
int rc = 0, i;
+ bitmap_max_page_index = LDISKFS_SB(inode->i_sb)->s_bitmap_maxbytes >>
+ PAGE_SHIFT;
for (i = 0, b = blocks; i < pages; i++, page++) {
+ if ((*page)->index + 1 >= bitmap_max_page_index) {
+ rc = -EFBIG;
+ break;
+ }
rc = ldiskfs_map_inode_page(inode, *page, b, create);
if (rc) {
CERROR("ino %lu, blk %lu create %d: rc %d\n",
int rc = 0, i = 0;
struct page *fp = NULL;
int clen = 0;
+ pgoff_t extent_max_page_index;
+
+ extent_max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
inode->i_ino, pages, (*page)->index);
continue;
}
+ if (fp->index + i >= extent_max_page_index)
+ GOTO(cleanup, rc = -EFBIG);
+
/* process found extent */
rc = osd_ldiskfs_map_nblocks(inode, fp->index * blocks_per_page,
clen * blocks_per_page, blocks,
int rc = 0, i = 0;
struct page *fp = NULL;
int clen = 0;
+ pgoff_t max_page_index;
+
+ max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
inode->i_ino, pages, (*page)->index);
if (++i != pages)
continue;
}
+ if (fp->index + i >= max_page_index)
+ GOTO(cleanup, rc = -EFBIG);
/* process found extent */
map.m_lblk = fp->index * blocks_per_page;
map.m_len = blen = clen * blocks_per_page;
}
run_test 243 "various group lock tests"
+test_250() {
+ [ "$(facet_fstype ost$(($($GETSTRIPE -i $DIR/$tfile) + 1)))" = "zfs" ] \
+ && skip "no 16TB file size limit on ZFS" && return
+
+ $SETSTRIPE -c 1 $DIR/$tfile
+ # ldiskfs extent file size limit is (16TB - 4KB - 1) bytes
+ local size=$((16 * 1024 * 1024 * 1024 * 1024 - 4096 - 1))
+ $TRUNCATE $DIR/$tfile $size || error "truncate $tfile to $size failed"
+ dd if=/dev/zero of=$DIR/$tfile bs=10 count=1 oflag=append \
+ conv=notrunc,fsync && error "append succeeded"
+ return 0
+}
+run_test 250 "Write above 16T limit"
+
cleanup_test_300() {
trap 0
umask $SAVE_UMASK