2 * unix_io.c --- This is the Unix (well, really POSIX) implementation
5 * Implements a one-block write-through cache.
7 * Includes support for Windows NT support under Cygwin.
9 * Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
10 * 2002 by Theodore Ts'o.
13 * This file may be redistributed under the terms of the GNU Library
14 * General Public License, version 2.
18 #if !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
19 #define _XOPEN_SOURCE 600
20 #define _DARWIN_C_SOURCE
21 #define _FILE_OFFSET_BITS 64
22 #ifndef _LARGEFILE_SOURCE
23 #define _LARGEFILE_SOURCE
25 #ifndef _LARGEFILE64_SOURCE
26 #define _LARGEFILE64_SOURCE
45 #include <sys/utsname.h>
48 #include <sys/types.h>
50 #ifdef HAVE_SYS_IOCTL_H
51 #include <sys/ioctl.h>
53 #ifdef HAVE_SYS_MOUNT_H
54 #include <sys/mount.h>
56 #ifdef HAVE_SYS_PRCTL_H
57 #include <sys/prctl.h>
59 #define PR_GET_DUMPABLE 3
64 #if HAVE_SYS_RESOURCE_H
65 #include <sys/resource.h>
67 #if HAVE_LINUX_FALLOC_H
68 #include <linux/falloc.h>
74 #if defined(__linux__) && defined(_IO) && !defined(BLKROGET)
75 #define BLKROGET _IO(0x12, 94) /* Get read-only status (0 = read_write). */
85 * For checking structure magic numbers...
88 #define EXT2_CHECK_MAGIC(struct, code) \
89 if ((struct)->magic != (code)) return (code)
93 unsigned long long block;
101 #define WRITE_DIRECT_SIZE 4 /* Must be smaller than CACHE_SIZE */
102 #define READ_DIRECT_SIZE 4 /* Should be smaller than CACHE_SIZE */
104 struct unix_private_data {
111 struct unix_cache cache[CACHE_SIZE];
113 struct struct_io_stats io_stats;
115 pthread_mutex_t cache_mutex;
116 pthread_mutex_t bounce_mutex;
117 pthread_mutex_t stats_mutex;
121 #define IS_ALIGNED(n, align) ((((uintptr_t) n) & \
122 ((uintptr_t) ((align)-1))) == 0)
124 typedef enum lock_kind {
125 CACHE_MTX, BOUNCE_MTX, STATS_MTX
129 static inline pthread_mutex_t *get_mutex(struct unix_private_data *data,
132 if (data->flags & IO_FLAG_THREADS) {
135 return &data->cache_mutex;
137 return &data->bounce_mutex;
139 return &data->stats_mutex;
146 static inline void mutex_lock(struct unix_private_data *data, kind_t kind)
149 pthread_mutex_t *mtx = get_mutex(data,kind);
152 pthread_mutex_lock(mtx);
156 static inline void mutex_unlock(struct unix_private_data *data, kind_t kind)
159 pthread_mutex_t *mtx = get_mutex(data,kind);
162 pthread_mutex_unlock(mtx);
166 static errcode_t unix_get_stats(io_channel channel, io_stats *stats)
168 errcode_t retval = 0;
170 struct unix_private_data *data;
172 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
173 data = (struct unix_private_data *) channel->private_data;
174 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
177 mutex_lock(data, STATS_MTX);
178 *stats = &data->io_stats;
179 mutex_unlock(data, STATS_MTX);
185 static char *safe_getenv(const char *arg)
187 if ((getuid() != geteuid()) || (getgid() != getegid()))
190 if (prctl(PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
193 #if (defined(linux) && defined(SYS_prctl))
194 if (syscall(SYS_prctl, PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
199 #if defined(HAVE_SECURE_GETENV)
200 return secure_getenv(arg);
201 #elif defined(HAVE___SECURE_GETENV)
202 return __secure_getenv(arg);
209 * Here are the raw I/O functions
211 static errcode_t raw_read_blk(io_channel channel,
212 struct unix_private_data *data,
213 unsigned long long block,
214 int count, void *bufv)
218 ext2_loff_t location;
220 unsigned char *buf = bufv;
221 ssize_t really_read = 0;
222 unsigned long long aligned_blk;
223 int align_size, offset;
225 size = (count < 0) ? -count : (ext2_loff_t) count * channel->block_size;
226 mutex_lock(data, STATS_MTX);
227 data->io_stats.bytes_read += size;
228 mutex_unlock(data, STATS_MTX);
229 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
231 if (data->flags & IO_FLAG_FORCE_BOUNCE)
235 /* Try an aligned pread */
236 if ((channel->align == 0) ||
237 (IS_ALIGNED(buf, channel->align) &&
238 IS_ALIGNED(location, channel->align) &&
239 IS_ALIGNED(size, channel->align))) {
240 actual = pread64(data->dev, buf, size, location);
246 /* Try an aligned pread */
247 if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
248 ((channel->align == 0) ||
249 (IS_ALIGNED(buf, channel->align) &&
250 IS_ALIGNED(location, channel->align) &&
251 IS_ALIGNED(size, channel->align)))) {
252 actual = pread(data->dev, buf, size, location);
257 #endif /* HAVE_PREAD */
259 if ((channel->align == 0) ||
260 (IS_ALIGNED(buf, channel->align) &&
261 IS_ALIGNED(location, channel->align) &&
262 IS_ALIGNED(size, channel->align))) {
263 mutex_lock(data, BOUNCE_MTX);
264 if (ext2fs_llseek(data->dev, location, SEEK_SET) < 0) {
265 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
268 actual = read(data->dev, buf, size);
269 if (actual != size) {
275 retval = EXT2_ET_SHORT_READ;
282 printf("raw_read_blk: O_DIRECT fallback: %p %lu\n", buf,
283 (unsigned long) size);
287 * The buffer or size which we're trying to read isn't aligned
288 * to the O_DIRECT rules, so we need to do this the hard way...
291 if (channel->align == 0)
293 if ((channel->block_size > channel->align) &&
294 (channel->block_size % channel->align) == 0)
295 align_size = channel->block_size;
297 align_size = channel->align;
298 aligned_blk = location / align_size;
299 offset = location % align_size;
301 mutex_lock(data, BOUNCE_MTX);
302 if (ext2fs_llseek(data->dev, aligned_blk * align_size, SEEK_SET) < 0) {
303 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
307 actual = read(data->dev, data->bounce, align_size);
308 if (actual != align_size) {
309 mutex_unlock(data, BOUNCE_MTX);
310 actual = really_read;
315 if ((actual + offset) > align_size)
316 actual = align_size - offset;
319 memcpy(buf, (char *)data->bounce + offset, actual);
321 really_read += actual;
328 mutex_unlock(data, BOUNCE_MTX);
332 mutex_unlock(data, BOUNCE_MTX);
333 if (actual >= 0 && actual < size)
334 memset((char *) buf+actual, 0, size-actual);
335 if (channel->read_error)
336 retval = (channel->read_error)(channel, block, count, buf,
337 size, actual, retval);
341 #define RAW_WRITE_NO_HANDLER 1
343 static errcode_t raw_write_blk(io_channel channel,
344 struct unix_private_data *data,
345 unsigned long long block,
346 int count, const void *bufv,
350 ext2_loff_t location;
353 const unsigned char *buf = bufv;
354 unsigned long long aligned_blk;
355 int align_size, offset;
358 size = channel->block_size;
363 size = (ext2_loff_t) count * channel->block_size;
365 mutex_lock(data, STATS_MTX);
366 data->io_stats.bytes_written += size;
367 mutex_unlock(data, STATS_MTX);
369 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
371 if (data->flags & IO_FLAG_FORCE_BOUNCE)
375 /* Try an aligned pwrite */
376 if ((channel->align == 0) ||
377 (IS_ALIGNED(buf, channel->align) &&
378 IS_ALIGNED(location, channel->align) &&
379 IS_ALIGNED(size, channel->align))) {
380 actual = pwrite64(data->dev, buf, size, location);
385 /* Try an aligned pwrite */
386 if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
387 ((channel->align == 0) ||
388 (IS_ALIGNED(buf, channel->align) &&
389 IS_ALIGNED(location, channel->align) &&
390 IS_ALIGNED(size, channel->align)))) {
391 actual = pwrite(data->dev, buf, size, location);
395 #endif /* HAVE_PWRITE */
397 if ((channel->align == 0) ||
398 (IS_ALIGNED(buf, channel->align) &&
399 IS_ALIGNED(location, channel->align) &&
400 IS_ALIGNED(size, channel->align))) {
401 mutex_lock(data, BOUNCE_MTX);
402 if (ext2fs_llseek(data->dev, location, SEEK_SET) < 0) {
403 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
406 actual = write(data->dev, buf, size);
407 mutex_unlock(data, BOUNCE_MTX);
412 if (actual != size) {
414 retval = EXT2_ET_SHORT_WRITE;
421 printf("raw_write_blk: O_DIRECT fallback: %p %lu\n", buf,
422 (unsigned long) size);
425 * The buffer or size which we're trying to write isn't aligned
426 * to the O_DIRECT rules, so we need to do this the hard way...
429 if (channel->align == 0)
431 if ((channel->block_size > channel->align) &&
432 (channel->block_size % channel->align) == 0)
433 align_size = channel->block_size;
435 align_size = channel->align;
436 aligned_blk = location / align_size;
437 offset = location % align_size;
442 mutex_lock(data, BOUNCE_MTX);
443 if (size < align_size || offset) {
444 if (ext2fs_llseek(data->dev, aligned_blk * align_size,
446 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
449 actual = read(data->dev, data->bounce,
451 if (actual != align_size) {
456 memset((char *) data->bounce + actual, 0,
457 align_size - actual);
461 if ((actual + offset) > align_size)
462 actual = align_size - offset;
465 memcpy(((char *)data->bounce) + offset, buf, actual);
466 if (ext2fs_llseek(data->dev, aligned_blk * align_size, SEEK_SET) < 0) {
467 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
470 actual_w = write(data->dev, data->bounce, align_size);
471 mutex_unlock(data, BOUNCE_MTX);
476 if (actual_w != align_size)
487 mutex_unlock(data, BOUNCE_MTX);
489 if (((flags & RAW_WRITE_NO_HANDLER) == 0) && channel->write_error)
490 retval = (channel->write_error)(channel, block, count, buf,
491 size, actual, retval);
497 * Here we implement the cache functions
500 /* Allocate the cache buffers */
501 static errcode_t alloc_cache(io_channel channel,
502 struct unix_private_data *data)
505 struct unix_cache *cache;
508 data->access_time = 0;
509 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
511 cache->access_time = 0;
515 ext2fs_free_mem(&cache->buf);
516 retval = io_channel_alloc_buf(channel, 0, &cache->buf);
520 if (channel->align || data->flags & IO_FLAG_FORCE_BOUNCE) {
522 ext2fs_free_mem(&data->bounce);
523 retval = io_channel_alloc_buf(channel, 0, &data->bounce);
528 /* Free the cache buffers */
529 static void free_cache(struct unix_private_data *data)
531 struct unix_cache *cache;
534 data->access_time = 0;
535 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
537 cache->access_time = 0;
541 ext2fs_free_mem(&cache->buf);
544 ext2fs_free_mem(&data->bounce);
549 * Try to find a block in the cache. If the block is not found, and
550 * eldest is a non-zero pointer, then fill in eldest with the cache
551 * entry to that should be reused.
553 static struct unix_cache *find_cached_block(struct unix_private_data *data,
554 unsigned long long block,
555 struct unix_cache **eldest)
557 struct unix_cache *cache, *unused_cache, *oldest_cache;
560 unused_cache = oldest_cache = 0;
561 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
562 if (!cache->in_use) {
564 unused_cache = cache;
567 if (cache->block == block) {
568 cache->access_time = ++data->access_time;
572 (cache->access_time < oldest_cache->access_time))
573 oldest_cache = cache;
576 *eldest = (unused_cache) ? unused_cache : oldest_cache;
581 * Reuse a particular cache entry for another block.
583 static errcode_t reuse_cache(io_channel channel,
584 struct unix_private_data *data, struct unix_cache *cache,
585 unsigned long long block)
587 if (cache->dirty && cache->in_use) {
590 retval = raw_write_blk(channel, data, cache->block, 1,
591 cache->buf, RAW_WRITE_NO_HANDLER);
593 cache->write_err = 1;
600 cache->write_err = 0;
601 cache->block = block;
602 cache->access_time = ++data->access_time;
606 #define FLUSH_INVALIDATE 0x01
607 #define FLUSH_NOLOCK 0x02
610 * Flush all of the blocks in the cache
612 static errcode_t flush_cached_blocks(io_channel channel,
613 struct unix_private_data *data,
616 struct unix_cache *cache;
617 errcode_t retval, retval2;
621 if ((flags & FLUSH_NOLOCK) == 0)
622 mutex_lock(data, CACHE_MTX);
623 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
627 if (flags & FLUSH_INVALIDATE)
633 retval = raw_write_blk(channel, data,
634 cache->block, 1, cache->buf, 0);
640 if ((flags & FLUSH_NOLOCK) == 0)
641 mutex_unlock(data, CACHE_MTX);
644 #endif /* NO_IO_CACHE */
647 #ifndef BLKDISCARDZEROES
648 #define BLKDISCARDZEROES _IO(0x12,124)
652 int ext2fs_open_file(const char *pathname, int flags, mode_t mode)
655 #if defined(HAVE_OPEN64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
656 return open64(pathname, flags, mode);
658 return open64(pathname, flags);
660 return open(pathname, flags, mode);
662 return open(pathname, flags);
666 int ext2fs_stat(const char *path, ext2fs_struct_stat *buf)
668 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
669 return stat64(path, buf);
671 return stat(path, buf);
675 int ext2fs_fstat(int fd, ext2fs_struct_stat *buf)
677 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
678 return fstat64(fd, buf);
680 return fstat(fd, buf);
685 static errcode_t unix_open_channel(const char *name, int fd,
686 int flags, io_channel *channel,
689 io_channel io = NULL;
690 struct unix_private_data *data = NULL;
692 ext2fs_struct_stat st;
697 if (safe_getenv("UNIX_IO_FORCE_BOUNCE"))
698 flags |= IO_FLAG_FORCE_BOUNCE;
702 * We need to make sure any previous errors in the block
703 * device are thrown away, sigh.
708 retval = ext2fs_get_mem(sizeof(struct struct_io_channel), &io);
711 memset(io, 0, sizeof(struct struct_io_channel));
712 io->magic = EXT2_ET_MAGIC_IO_CHANNEL;
713 retval = ext2fs_get_mem(sizeof(struct unix_private_data), &data);
717 io->manager = io_mgr;
718 retval = ext2fs_get_mem(strlen(name)+1, &io->name);
722 strcpy(io->name, name);
723 io->private_data = data;
724 io->block_size = 1024;
730 memset(data, 0, sizeof(struct unix_private_data));
731 data->magic = EXT2_ET_MAGIC_UNIX_IO_CHANNEL;
732 data->io_stats.num_fields = 2;
736 #if defined(O_DIRECT)
737 if (flags & IO_FLAG_DIRECT_IO)
738 io->align = ext2fs_get_dio_alignment(data->dev);
739 #elif defined(F_NOCACHE)
740 if (flags & IO_FLAG_DIRECT_IO)
745 * If the device is really a block device, then set the
746 * appropriate flag, otherwise we can set DISCARD_ZEROES flag
747 * because we are going to use punch hole instead of discard
748 * and if it succeed, subsequent read from sparse area returns
751 if (ext2fs_fstat(data->dev, &st) == 0) {
752 if (ext2fsP_is_disk_device(st.st_mode))
753 io->flags |= CHANNEL_FLAGS_BLOCK_DEVICE;
755 io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
758 #ifdef BLKDISCARDZEROES
761 if (ioctl(data->dev, BLKDISCARDZEROES, &zeroes) == 0 &&
763 io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
767 #if defined(__CYGWIN__)
769 * Some operating systems require that the buffers be aligned,
770 * regardless of O_DIRECT
776 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
777 if (io->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
778 int dio_align = ext2fs_get_dio_alignment(fd);
780 if (io->align < dio_align)
781 io->align = dio_align;
785 if ((retval = alloc_cache(io, data)))
789 if (flags & IO_FLAG_RW) {
793 /* Is the block device actually writable? */
794 error = ioctl(data->dev, BLKROGET, &readonly);
795 if (!error && readonly) {
804 #if (defined(__alpha__) || ((defined(__sparc__) || defined(__mips__)) && (SIZEOF_LONG == 4)))
805 #define RLIM_INFINITY ((unsigned long)(~0UL>>1))
807 #define RLIM_INFINITY (~0UL)
810 * Work around a bug in 2.4.10-2.4.18 kernels where writes to
811 * block devices are wrongly getting hit by the filesize
812 * limit. This workaround isn't perfect, since it won't work
813 * if glibc wasn't built against 2.2 header files. (Sigh.)
816 if ((flags & IO_FLAG_RW) &&
818 ((ut.release[0] == '2') && (ut.release[1] == '.') &&
819 (ut.release[2] == '4') && (ut.release[3] == '.') &&
820 (ut.release[4] == '1') && (ut.release[5] >= '0') &&
821 (ut.release[5] < '8')) &&
822 (ext2fs_fstat(data->dev, &st) == 0) &&
823 (ext2fsP_is_disk_device(st.st_mode))) {
826 rlim.rlim_cur = rlim.rlim_max = (unsigned long) RLIM_INFINITY;
827 setrlimit(RLIMIT_FSIZE, &rlim);
828 getrlimit(RLIMIT_FSIZE, &rlim);
829 if (((unsigned long) rlim.rlim_cur) <
830 ((unsigned long) rlim.rlim_max)) {
831 rlim.rlim_cur = rlim.rlim_max;
832 setrlimit(RLIMIT_FSIZE, &rlim);
837 if (flags & IO_FLAG_THREADS) {
838 io->flags |= CHANNEL_FLAGS_THREADS;
839 retval = pthread_mutex_init(&data->cache_mutex, NULL);
842 retval = pthread_mutex_init(&data->bounce_mutex, NULL);
844 pthread_mutex_destroy(&data->cache_mutex);
847 retval = pthread_mutex_init(&data->stats_mutex, NULL);
849 pthread_mutex_destroy(&data->cache_mutex);
850 pthread_mutex_destroy(&data->bounce_mutex);
863 ext2fs_free_mem(&data);
867 ext2fs_free_mem(&io->name);
869 ext2fs_free_mem(&io);
874 static errcode_t unixfd_open(const char *str_fd, int flags,
881 #if defined(HAVE_FCNTL)
882 fd_flags = fcntl(fd, F_GETFD);
887 if (fd_flags & O_RDWR)
889 if (fd_flags & O_EXCL)
890 flags |= IO_FLAG_EXCLUSIVE;
891 #if defined(O_DIRECT)
892 if (fd_flags & O_DIRECT)
893 flags |= IO_FLAG_DIRECT_IO;
895 #endif /* HAVE_FCNTL */
897 return unix_open_channel(str_fd, fd, flags, channel, unixfd_io_manager);
900 static errcode_t unix_open(const char *name, int flags,
907 return EXT2_ET_BAD_DEVICE_NAME;
909 open_flags = (flags & IO_FLAG_RW) ? O_RDWR : O_RDONLY;
910 if (flags & IO_FLAG_EXCLUSIVE)
911 open_flags |= O_EXCL;
912 #if defined(O_DIRECT)
913 if (flags & IO_FLAG_DIRECT_IO)
914 open_flags |= O_DIRECT;
916 fd = ext2fs_open_file(name, open_flags, 0);
919 #if defined(F_NOCACHE) && !defined(IO_DIRECT)
920 if (flags & IO_FLAG_DIRECT_IO) {
921 if (fcntl(fd, F_NOCACHE, 1) < 0)
925 return unix_open_channel(name, fd, flags, channel, unix_io_manager);
928 static errcode_t unix_close(io_channel channel)
930 struct unix_private_data *data;
931 errcode_t retval = 0;
933 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
934 data = (struct unix_private_data *) channel->private_data;
935 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
937 if (--channel->refcount > 0)
941 retval = flush_cached_blocks(channel, data, 0);
944 if (close(data->dev) < 0)
948 if (data->flags & IO_FLAG_THREADS) {
949 pthread_mutex_destroy(&data->cache_mutex);
950 pthread_mutex_destroy(&data->bounce_mutex);
951 pthread_mutex_destroy(&data->stats_mutex);
955 ext2fs_free_mem(&channel->private_data);
957 ext2fs_free_mem(&channel->name);
958 ext2fs_free_mem(&channel);
962 static errcode_t unix_set_blksize(io_channel channel, int blksize)
964 struct unix_private_data *data;
965 errcode_t retval = 0;
967 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
968 data = (struct unix_private_data *) channel->private_data;
969 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
971 if (channel->block_size != blksize) {
972 mutex_lock(data, CACHE_MTX);
973 mutex_lock(data, BOUNCE_MTX);
975 if ((retval = flush_cached_blocks(channel, data, FLUSH_NOLOCK))){
976 mutex_unlock(data, BOUNCE_MTX);
977 mutex_unlock(data, CACHE_MTX);
982 channel->block_size = blksize;
984 retval = alloc_cache(channel, data);
985 mutex_unlock(data, BOUNCE_MTX);
986 mutex_unlock(data, CACHE_MTX);
991 static errcode_t unix_read_blk64(io_channel channel, unsigned long long block,
992 int count, void *buf)
994 struct unix_private_data *data;
995 struct unix_cache *cache;
1000 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1001 data = (struct unix_private_data *) channel->private_data;
1002 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1005 return raw_read_blk(channel, data, block, count, buf);
1007 if (data->flags & IO_FLAG_NOCACHE)
1008 return raw_read_blk(channel, data, block, count, buf);
1010 * If we're doing an odd-sized read or a very large read,
1011 * flush out the cache and then do a direct read.
1013 if (count < 0 || count > WRITE_DIRECT_SIZE) {
1014 if ((retval = flush_cached_blocks(channel, data, 0)))
1016 return raw_read_blk(channel, data, block, count, buf);
1020 mutex_lock(data, CACHE_MTX);
1022 /* If it's in the cache, use it! */
1023 if ((cache = find_cached_block(data, block, NULL))) {
1025 printf("Using cached block %lu\n", block);
1027 memcpy(cp, cache->buf, channel->block_size);
1030 cp += channel->block_size;
1035 * Find the number of uncached blocks so we can do a
1036 * single read request
1038 for (i=1; i < count; i++)
1039 if (find_cached_block(data, block+i, NULL))
1042 printf("Reading %d blocks starting at %lu\n", i, block);
1044 mutex_unlock(data, CACHE_MTX);
1045 if ((retval = raw_read_blk(channel, data, block, i, cp)))
1047 mutex_lock(data, CACHE_MTX);
1049 /* Save the results in the cache */
1050 for (j=0; j < i; j++) {
1051 if (!find_cached_block(data, block, &cache)) {
1052 retval = reuse_cache(channel, data,
1055 goto call_write_handler;
1056 memcpy(cache->buf, cp, channel->block_size);
1060 cp += channel->block_size;
1063 mutex_unlock(data, CACHE_MTX);
1067 if (cache->write_err && channel->write_error) {
1068 char *err_buf = NULL;
1069 unsigned long long err_block = cache->block;
1073 cache->write_err = 0;
1074 if (io_channel_alloc_buf(channel, 0, &err_buf))
1077 memcpy(err_buf, cache->buf, channel->block_size);
1078 mutex_unlock(data, CACHE_MTX);
1079 (channel->write_error)(channel, err_block, 1, err_buf,
1080 channel->block_size, -1,
1083 ext2fs_free_mem(&err_buf);
1085 mutex_unlock(data, CACHE_MTX);
1087 #endif /* NO_IO_CACHE */
1090 static errcode_t unix_read_blk(io_channel channel, unsigned long block,
1091 int count, void *buf)
1093 return unix_read_blk64(channel, block, count, buf);
1096 static errcode_t unix_write_blk64(io_channel channel, unsigned long long block,
1097 int count, const void *buf)
1099 struct unix_private_data *data;
1100 struct unix_cache *cache, *reuse;
1101 errcode_t retval = 0;
1105 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1106 data = (struct unix_private_data *) channel->private_data;
1107 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1110 return raw_write_blk(channel, data, block, count, buf, 0);
1112 if (data->flags & IO_FLAG_NOCACHE)
1113 return raw_write_blk(channel, data, block, count, buf, 0);
1115 * If we're doing an odd-sized write or a very large write,
1116 * flush out the cache completely and then do a direct write.
1118 if (count < 0 || count > WRITE_DIRECT_SIZE) {
1119 if ((retval = flush_cached_blocks(channel, data,
1122 return raw_write_blk(channel, data, block, count, buf, 0);
1126 * For a moderate-sized multi-block write, first force a write
1127 * if we're in write-through cache mode, and then fill the
1128 * cache with the blocks.
1130 writethrough = channel->flags & CHANNEL_FLAGS_WRITETHROUGH;
1132 retval = raw_write_blk(channel, data, block, count, buf, 0);
1135 mutex_lock(data, CACHE_MTX);
1137 cache = find_cached_block(data, block, &reuse);
1142 err = reuse_cache(channel, data, cache, block);
1144 goto call_write_handler;
1146 if (cache->buf != cp)
1147 memcpy(cache->buf, cp, channel->block_size);
1148 cache->dirty = !writethrough;
1151 cp += channel->block_size;
1153 mutex_unlock(data, CACHE_MTX);
1157 if (cache->write_err && channel->write_error) {
1158 char *err_buf = NULL;
1159 unsigned long long err_block = cache->block;
1163 cache->write_err = 0;
1164 if (io_channel_alloc_buf(channel, 0, &err_buf))
1167 memcpy(err_buf, cache->buf, channel->block_size);
1168 mutex_unlock(data, CACHE_MTX);
1169 (channel->write_error)(channel, err_block, 1, err_buf,
1170 channel->block_size, -1,
1173 ext2fs_free_mem(&err_buf);
1175 mutex_unlock(data, CACHE_MTX);
1177 #endif /* NO_IO_CACHE */
1180 static errcode_t unix_cache_readahead(io_channel channel,
1181 unsigned long long block,
1182 unsigned long long count)
1184 #ifdef POSIX_FADV_WILLNEED
1185 struct unix_private_data *data;
1187 data = (struct unix_private_data *)channel->private_data;
1188 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1189 return posix_fadvise(data->dev,
1190 (ext2_loff_t)block * channel->block_size + data->offset,
1191 (ext2_loff_t)count * channel->block_size,
1192 POSIX_FADV_WILLNEED);
1194 return EXT2_ET_OP_NOT_SUPPORTED;
1198 static errcode_t unix_write_blk(io_channel channel, unsigned long block,
1199 int count, const void *buf)
1201 return unix_write_blk64(channel, block, count, buf);
1204 static errcode_t unix_write_byte(io_channel channel, unsigned long offset,
1205 int size, const void *buf)
1207 struct unix_private_data *data;
1208 errcode_t retval = 0;
1211 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1212 data = (struct unix_private_data *) channel->private_data;
1213 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1215 if (channel->align != 0) {
1217 printf("unix_write_byte: O_DIRECT fallback\n");
1219 return EXT2_ET_UNIMPLEMENTED;
1224 * Flush out the cache completely
1226 if ((retval = flush_cached_blocks(channel, data, FLUSH_INVALIDATE)))
1230 if (lseek(data->dev, offset + data->offset, SEEK_SET) < 0)
1233 actual = write(data->dev, buf, size);
1237 return EXT2_ET_SHORT_WRITE;
1243 * Flush data buffers to disk.
1245 static errcode_t unix_flush(io_channel channel)
1247 struct unix_private_data *data;
1248 errcode_t retval = 0;
1250 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1251 data = (struct unix_private_data *) channel->private_data;
1252 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1255 retval = flush_cached_blocks(channel, data, 0);
1258 if (!retval && fsync(data->dev) != 0)
1264 static errcode_t unix_set_option(io_channel channel, const char *option,
1267 struct unix_private_data *data;
1268 unsigned long long tmp;
1272 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1273 data = (struct unix_private_data *) channel->private_data;
1274 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1276 if (!strcmp(option, "offset")) {
1278 return EXT2_ET_INVALID_ARGUMENT;
1280 tmp = strtoull(arg, &end, 0);
1282 return EXT2_ET_INVALID_ARGUMENT;
1284 if (data->offset < 0)
1285 return EXT2_ET_INVALID_ARGUMENT;
1288 if (!strcmp(option, "cache")) {
1290 return EXT2_ET_INVALID_ARGUMENT;
1291 if (!strcmp(arg, "on")) {
1292 data->flags &= ~IO_FLAG_NOCACHE;
1295 if (!strcmp(arg, "off")) {
1296 retval = flush_cached_blocks(channel, data, 0);
1297 data->flags |= IO_FLAG_NOCACHE;
1300 return EXT2_ET_INVALID_ARGUMENT;
1302 return EXT2_ET_INVALID_ARGUMENT;
1305 #if defined(__linux__) && !defined(BLKDISCARD)
1306 #define BLKDISCARD _IO(0x12,119)
1309 static errcode_t unix_discard(io_channel channel, unsigned long long block,
1310 unsigned long long count)
1312 struct unix_private_data *data;
1315 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1316 data = (struct unix_private_data *) channel->private_data;
1317 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1319 if (channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
1323 range[0] = (__u64)(block) * channel->block_size + data->offset;
1324 range[1] = (__u64)(count) * channel->block_size;
1326 ret = ioctl(data->dev, BLKDISCARD, &range);
1331 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE)
1333 * If we are not on block device, try to use punch hole
1334 * to reclaim free space.
1336 ret = fallocate(data->dev,
1337 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1338 (off_t)(block) * channel->block_size + data->offset,
1339 (off_t)(count) * channel->block_size);
1345 if (errno == EOPNOTSUPP)
1351 return EXT2_ET_UNIMPLEMENTED;
1355 * If we know about ZERO_RANGE, try that before we try PUNCH_HOLE because
1356 * ZERO_RANGE doesn't unmap preallocated blocks. We prefer fallocate because
1357 * it always invalidates page cache, and libext2fs requires that reads after
1358 * ZERO_RANGE return zeroes.
1360 static int __unix_zeroout(int fd, off_t offset, off_t len)
1364 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_ZERO_RANGE)
1365 ret = fallocate(fd, FALLOC_FL_ZERO_RANGE, offset, len);
1369 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
1370 ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1379 /* parameters might not be used if OS doesn't support zeroout */
1380 #if __GNUC_PREREQ (4, 6)
1381 #pragma GCC diagnostic push
1382 #pragma GCC diagnostic ignored "-Wunused-parameter"
1384 static errcode_t unix_zeroout(io_channel channel, unsigned long long block,
1385 unsigned long long count)
1387 struct unix_private_data *data;
1390 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1391 data = (struct unix_private_data *) channel->private_data;
1392 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1394 if (safe_getenv("UNIX_IO_NOZEROOUT"))
1397 if (!(channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE)) {
1398 /* Regular file, try to use truncate/punch/zero. */
1399 struct stat statbuf;
1404 * If we're trying to zero a range past the end of the file,
1405 * extend the file size, then truncate everything.
1407 ret = fstat(data->dev, &statbuf);
1410 if ((unsigned long long) statbuf.st_size <
1411 (block + count) * channel->block_size + data->offset) {
1412 ret = ftruncate(data->dev,
1413 (block + count) * channel->block_size + data->offset);
1419 ret = __unix_zeroout(data->dev,
1420 (off_t)(block) * channel->block_size + data->offset,
1421 (off_t)(count) * channel->block_size);
1424 if (errno == EOPNOTSUPP)
1430 return EXT2_ET_UNIMPLEMENTED;
1432 #if __GNUC_PREREQ (4, 6)
1433 #pragma GCC diagnostic pop
1436 static struct struct_io_manager struct_unix_manager = {
1437 .magic = EXT2_ET_MAGIC_IO_MANAGER,
1438 .name = "Unix I/O Manager",
1440 .close = unix_close,
1441 .set_blksize = unix_set_blksize,
1442 .read_blk = unix_read_blk,
1443 .write_blk = unix_write_blk,
1444 .flush = unix_flush,
1445 .write_byte = unix_write_byte,
1446 .set_option = unix_set_option,
1447 .get_stats = unix_get_stats,
1448 .read_blk64 = unix_read_blk64,
1449 .write_blk64 = unix_write_blk64,
1450 .discard = unix_discard,
1451 .cache_readahead = unix_cache_readahead,
1452 .zeroout = unix_zeroout,
1455 io_manager unix_io_manager = &struct_unix_manager;
1457 static struct struct_io_manager struct_unixfd_manager = {
1458 .magic = EXT2_ET_MAGIC_IO_MANAGER,
1459 .name = "Unix fd I/O Manager",
1460 .open = unixfd_open,
1461 .close = unix_close,
1462 .set_blksize = unix_set_blksize,
1463 .read_blk = unix_read_blk,
1464 .write_blk = unix_write_blk,
1465 .flush = unix_flush,
1466 .write_byte = unix_write_byte,
1467 .set_option = unix_set_option,
1468 .get_stats = unix_get_stats,
1469 .read_blk64 = unix_read_blk64,
1470 .write_blk64 = unix_write_blk64,
1471 .discard = unix_discard,
1472 .cache_readahead = unix_cache_readahead,
1473 .zeroout = unix_zeroout,
1476 io_manager unixfd_io_manager = &struct_unixfd_manager;