• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * unix_io.c --- This is the Unix (well, really POSIX) implementation
3  *	of the I/O manager.
4  *
5  * Implements a one-block write-through cache.
6  *
7  * Includes support for Windows NT support under Cygwin.
8  *
9  * Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
10  *	2002 by Theodore Ts'o.
11  *
12  * %Begin-Header%
13  * This file may be redistributed under the terms of the GNU Library
14  * General Public License, version 2.
15  * %End-Header%
16  */
17 
18 #if !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
19 #define _XOPEN_SOURCE 600
20 #define _DARWIN_C_SOURCE
21 #define _FILE_OFFSET_BITS 64
22 #ifndef _LARGEFILE_SOURCE
23 #define _LARGEFILE_SOURCE
24 #endif
25 #ifndef _LARGEFILE64_SOURCE
26 #define _LARGEFILE64_SOURCE
27 #endif
28 #ifndef _GNU_SOURCE
29 #define _GNU_SOURCE
30 #endif
31 #endif
32 
33 #include "config.h"
34 #include <stdio.h>
35 #include <string.h>
36 #if HAVE_UNISTD_H
37 #include <unistd.h>
38 #endif
39 #if HAVE_ERRNO_H
40 #include <errno.h>
41 #endif
42 #include <fcntl.h>
43 #include <time.h>
44 #ifdef __linux__
45 #include <sys/utsname.h>
46 #endif
47 #if HAVE_SYS_TYPES_H
48 #include <sys/types.h>
49 #endif
50 #ifdef HAVE_SYS_IOCTL_H
51 #include <sys/ioctl.h>
52 #endif
53 #ifdef HAVE_SYS_MOUNT_H
54 #include <sys/mount.h>
55 #endif
56 #ifdef HAVE_SYS_PRCTL_H
57 #include <sys/prctl.h>
58 #else
59 #define PR_GET_DUMPABLE 3
60 #endif
61 #if HAVE_SYS_STAT_H
62 #include <sys/stat.h>
63 #endif
64 #if HAVE_SYS_RESOURCE_H
65 #include <sys/resource.h>
66 #endif
67 #if HAVE_LINUX_FALLOC_H
68 #include <linux/falloc.h>
69 #endif
70 #ifdef HAVE_PTHREAD
71 #include <pthread.h>
72 #endif
73 
74 #if defined(__linux__) && defined(_IO) && !defined(BLKROGET)
75 #define BLKROGET   _IO(0x12, 94) /* Get read-only status (0 = read_write).  */
76 #endif
77 
78 #undef ALIGN_DEBUG
79 
80 #include "ext2_fs.h"
81 #include "ext2fs.h"
82 #include "ext2fsP.h"
83 
84 /*
85  * For checking structure magic numbers...
86  */
87 
88 #define EXT2_CHECK_MAGIC(struct, code) \
89 	  if ((struct)->magic != (code)) return (code)
90 
91 struct unix_cache {
92 	char			*buf;
93 	unsigned long long	block;
94 	int			access_time;
95 	unsigned		dirty:1;
96 	unsigned		in_use:1;
97 };
98 
99 #define CACHE_SIZE 8
100 #define WRITE_DIRECT_SIZE 4	/* Must be smaller than CACHE_SIZE */
101 #define READ_DIRECT_SIZE 4	/* Should be smaller than CACHE_SIZE */
102 
103 struct unix_private_data {
104 	int	magic;
105 	int	dev;
106 	int	flags;
107 	int	align;
108 	int	access_time;
109 	ext2_loff_t offset;
110 	struct unix_cache cache[CACHE_SIZE];
111 	void	*bounce;
112 	struct struct_io_stats io_stats;
113 #ifdef HAVE_PTHREAD
114 	pthread_mutex_t cache_mutex;
115 	pthread_mutex_t bounce_mutex;
116 	pthread_mutex_t stats_mutex;
117 #endif
118 };
119 
120 #define IS_ALIGNED(n, align) ((((uintptr_t) n) & \
121 			       ((uintptr_t) ((align)-1))) == 0)
122 
123 typedef enum lock_kind {
124 	CACHE_MTX, BOUNCE_MTX, STATS_MTX
125 } kind_t;
126 
127 #ifdef HAVE_PTHREAD
get_mutex(struct unix_private_data * data,kind_t kind)128 static inline pthread_mutex_t *get_mutex(struct unix_private_data *data,
129 					 kind_t kind)
130 {
131 	if (data->flags & IO_FLAG_THREADS) {
132 		switch (kind) {
133 		case CACHE_MTX:
134 			return &data->cache_mutex;
135 		case BOUNCE_MTX:
136 			return &data->bounce_mutex;
137 		case STATS_MTX:
138 			return &data->stats_mutex;
139 		}
140 	}
141 	return NULL;
142 }
143 #endif
144 
mutex_lock(struct unix_private_data * data,kind_t kind)145 static inline void mutex_lock(struct unix_private_data *data, kind_t kind)
146 {
147 #ifdef HAVE_PTHREAD
148 	pthread_mutex_t *mtx = get_mutex(data,kind);
149 
150 	if (mtx)
151 		pthread_mutex_lock(mtx);
152 #endif
153 }
154 
mutex_unlock(struct unix_private_data * data,kind_t kind)155 static inline void mutex_unlock(struct unix_private_data *data, kind_t kind)
156 {
157 #ifdef HAVE_PTHREAD
158 	pthread_mutex_t *mtx = get_mutex(data,kind);
159 
160 	if (mtx)
161 		pthread_mutex_unlock(mtx);
162 #endif
163 }
164 
unix_get_stats(io_channel channel,io_stats * stats)165 static errcode_t unix_get_stats(io_channel channel, io_stats *stats)
166 {
167 	errcode_t	retval = 0;
168 
169 	struct unix_private_data *data;
170 
171 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
172 	data = (struct unix_private_data *) channel->private_data;
173 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
174 
175 	if (stats) {
176 		mutex_lock(data, STATS_MTX);
177 		*stats = &data->io_stats;
178 		mutex_unlock(data, STATS_MTX);
179 	}
180 
181 	return retval;
182 }
183 
safe_getenv(const char * arg)184 static char *safe_getenv(const char *arg)
185 {
186 	if ((getuid() != geteuid()) || (getgid() != getegid()))
187 		return NULL;
188 #ifdef HAVE_PRCTL
189 	if (prctl(PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
190 		return NULL;
191 #else
192 #if (defined(linux) && defined(SYS_prctl))
193 	if (syscall(SYS_prctl, PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
194 		return NULL;
195 #endif
196 #endif
197 
198 #if defined(HAVE_SECURE_GETENV)
199 	return secure_getenv(arg);
200 #elif defined(HAVE___SECURE_GETENV)
201 	return __secure_getenv(arg);
202 #else
203 	return getenv(arg);
204 #endif
205 }
206 
207 /*
208  * Here are the raw I/O functions
209  */
raw_read_blk(io_channel channel,struct unix_private_data * data,unsigned long long block,int count,void * bufv)210 static errcode_t raw_read_blk(io_channel channel,
211 			      struct unix_private_data *data,
212 			      unsigned long long block,
213 			      int count, void *bufv)
214 {
215 	errcode_t	retval;
216 	ssize_t		size;
217 	ext2_loff_t	location;
218 	int		actual = 0;
219 	unsigned char	*buf = bufv;
220 	ssize_t		really_read = 0;
221 	unsigned long long aligned_blk;
222 	int		align_size, offset;
223 
224 	size = (count < 0) ? -count : (ext2_loff_t) count * channel->block_size;
225 	mutex_lock(data, STATS_MTX);
226 	data->io_stats.bytes_read += size;
227 	mutex_unlock(data, STATS_MTX);
228 	location = ((ext2_loff_t) block * channel->block_size) + data->offset;
229 
230 	if (data->flags & IO_FLAG_FORCE_BOUNCE)
231 		goto bounce_read;
232 
233 #ifdef HAVE_PREAD64
234 	/* Try an aligned pread */
235 	if ((channel->align == 0) ||
236 	    (IS_ALIGNED(buf, channel->align) &&
237 	     IS_ALIGNED(location, channel->align) &&
238 	     IS_ALIGNED(size, channel->align))) {
239 		actual = pread64(data->dev, buf, size, location);
240 		if (actual == size)
241 			return 0;
242 		actual = 0;
243 	}
244 #elif HAVE_PREAD
245 	/* Try an aligned pread */
246 	if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
247 	    ((channel->align == 0) ||
248 	     (IS_ALIGNED(buf, channel->align) &&
249 	      IS_ALIGNED(location, channel->align) &&
250 	      IS_ALIGNED(size, channel->align)))) {
251 		actual = pread(data->dev, buf, size, location);
252 		if (actual == size)
253 			return 0;
254 		actual = 0;
255 	}
256 #endif /* HAVE_PREAD */
257 
258 	if ((channel->align == 0) ||
259 	    (IS_ALIGNED(buf, channel->align) &&
260 	     IS_ALIGNED(location, channel->align) &&
261 	     IS_ALIGNED(size, channel->align))) {
262 		mutex_lock(data, BOUNCE_MTX);
263 		if (ext2fs_llseek(data->dev, location, SEEK_SET) < 0) {
264 			retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
265 			goto error_unlock;
266 		}
267 		actual = read(data->dev, buf, size);
268 		if (actual != size) {
269 		short_read:
270 			if (actual < 0) {
271 				retval = errno;
272 				actual = 0;
273 			} else
274 				retval = EXT2_ET_SHORT_READ;
275 			goto error_unlock;
276 		}
277 		goto success_unlock;
278 	}
279 
280 #ifdef ALIGN_DEBUG
281 	printf("raw_read_blk: O_DIRECT fallback: %p %lu\n", buf,
282 	       (unsigned long) size);
283 #endif
284 
285 	/*
286 	 * The buffer or size which we're trying to read isn't aligned
287 	 * to the O_DIRECT rules, so we need to do this the hard way...
288 	 */
289 bounce_read:
290 	if (channel->align == 0)
291 		channel->align = 1;
292 	if ((channel->block_size > channel->align) &&
293 	    (channel->block_size % channel->align) == 0)
294 		align_size = channel->block_size;
295 	else
296 		align_size = channel->align;
297 	aligned_blk = location / align_size;
298 	offset = location % align_size;
299 
300 	mutex_lock(data, BOUNCE_MTX);
301 	if (ext2fs_llseek(data->dev, aligned_blk * align_size, SEEK_SET) < 0) {
302 		retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
303 		goto error_unlock;
304 	}
305 	while (size > 0) {
306 		actual = read(data->dev, data->bounce, align_size);
307 		if (actual != align_size) {
308 			mutex_unlock(data, BOUNCE_MTX);
309 			actual = really_read;
310 			buf -= really_read;
311 			size += really_read;
312 			goto short_read;
313 		}
314 		if ((actual + offset) > align_size)
315 			actual = align_size - offset;
316 		if (actual > size)
317 			actual = size;
318 		memcpy(buf, (char *)data->bounce + offset, actual);
319 
320 		really_read += actual;
321 		size -= actual;
322 		buf += actual;
323 		offset = 0;
324 		aligned_blk++;
325 	}
326 success_unlock:
327 	mutex_unlock(data, BOUNCE_MTX);
328 	return 0;
329 
330 error_unlock:
331 	mutex_unlock(data, BOUNCE_MTX);
332 	if (actual >= 0 && actual < size)
333 		memset((char *) buf+actual, 0, size-actual);
334 	if (channel->read_error)
335 		retval = (channel->read_error)(channel, block, count, buf,
336 					       size, actual, retval);
337 	return retval;
338 }
339 
raw_write_blk(io_channel channel,struct unix_private_data * data,unsigned long long block,int count,const void * bufv)340 static errcode_t raw_write_blk(io_channel channel,
341 			       struct unix_private_data *data,
342 			       unsigned long long block,
343 			       int count, const void *bufv)
344 {
345 	ssize_t		size;
346 	ext2_loff_t	location;
347 	int		actual = 0;
348 	errcode_t	retval;
349 	const unsigned char *buf = bufv;
350 	unsigned long long aligned_blk;
351 	int		align_size, offset;
352 
353 	if (count == 1)
354 		size = channel->block_size;
355 	else {
356 		if (count < 0)
357 			size = -count;
358 		else
359 			size = (ext2_loff_t) count * channel->block_size;
360 	}
361 	mutex_lock(data, STATS_MTX);
362 	data->io_stats.bytes_written += size;
363 	mutex_unlock(data, STATS_MTX);
364 
365 	location = ((ext2_loff_t) block * channel->block_size) + data->offset;
366 
367 	if (data->flags & IO_FLAG_FORCE_BOUNCE)
368 		goto bounce_write;
369 
370 #ifdef HAVE_PWRITE64
371 	/* Try an aligned pwrite */
372 	if ((channel->align == 0) ||
373 	    (IS_ALIGNED(buf, channel->align) &&
374 	     IS_ALIGNED(location, channel->align) &&
375 	     IS_ALIGNED(size, channel->align))) {
376 		actual = pwrite64(data->dev, buf, size, location);
377 		if (actual == size)
378 			return 0;
379 	}
380 #elif HAVE_PWRITE
381 	/* Try an aligned pwrite */
382 	if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
383 	    ((channel->align == 0) ||
384 	     (IS_ALIGNED(buf, channel->align) &&
385 	      IS_ALIGNED(location, channel->align) &&
386 	      IS_ALIGNED(size, channel->align)))) {
387 		actual = pwrite(data->dev, buf, size, location);
388 		if (actual == size)
389 			return 0;
390 	}
391 #endif /* HAVE_PWRITE */
392 
393 	if ((channel->align == 0) ||
394 	    (IS_ALIGNED(buf, channel->align) &&
395 	     IS_ALIGNED(location, channel->align) &&
396 	     IS_ALIGNED(size, channel->align))) {
397 		mutex_lock(data, BOUNCE_MTX);
398 		if (ext2fs_llseek(data->dev, location, SEEK_SET) < 0) {
399 			retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
400 			goto error_unlock;
401 		}
402 		actual = write(data->dev, buf, size);
403 		mutex_unlock(data, BOUNCE_MTX);
404 		if (actual < 0) {
405 			retval = errno;
406 			goto error_out;
407 		}
408 		if (actual != size) {
409 		short_write:
410 			retval = EXT2_ET_SHORT_WRITE;
411 			goto error_out;
412 		}
413 		return 0;
414 	}
415 
416 #ifdef ALIGN_DEBUG
417 	printf("raw_write_blk: O_DIRECT fallback: %p %lu\n", buf,
418 	       (unsigned long) size);
419 #endif
420 	/*
421 	 * The buffer or size which we're trying to write isn't aligned
422 	 * to the O_DIRECT rules, so we need to do this the hard way...
423 	 */
424 bounce_write:
425 	if (channel->align == 0)
426 		channel->align = 1;
427 	if ((channel->block_size > channel->align) &&
428 	    (channel->block_size % channel->align) == 0)
429 		align_size = channel->block_size;
430 	else
431 		align_size = channel->align;
432 	aligned_blk = location / align_size;
433 	offset = location % align_size;
434 
435 	while (size > 0) {
436 		int actual_w;
437 
438 		mutex_lock(data, BOUNCE_MTX);
439 		if (size < align_size || offset) {
440 			if (ext2fs_llseek(data->dev, aligned_blk * align_size,
441 					  SEEK_SET) < 0) {
442 				retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
443 				goto error_unlock;
444 			}
445 			actual = read(data->dev, data->bounce,
446 				      align_size);
447 			if (actual != align_size) {
448 				if (actual < 0) {
449 					retval = errno;
450 					goto error_unlock;
451 				}
452 				memset((char *) data->bounce + actual, 0,
453 				       align_size - actual);
454 			}
455 		}
456 		actual = size;
457 		if ((actual + offset) > align_size)
458 			actual = align_size - offset;
459 		if (actual > size)
460 			actual = size;
461 		memcpy(((char *)data->bounce) + offset, buf, actual);
462 		if (ext2fs_llseek(data->dev, aligned_blk * align_size, SEEK_SET) < 0) {
463 			retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
464 			goto error_unlock;
465 		}
466 		actual_w = write(data->dev, data->bounce, align_size);
467 		mutex_unlock(data, BOUNCE_MTX);
468 		if (actual_w < 0) {
469 			retval = errno;
470 			goto error_out;
471 		}
472 		if (actual_w != align_size)
473 			goto short_write;
474 		size -= actual;
475 		buf += actual;
476 		location += actual;
477 		aligned_blk++;
478 		offset = 0;
479 	}
480 	return 0;
481 
482 error_unlock:
483 	mutex_unlock(data, BOUNCE_MTX);
484 error_out:
485 	if (channel->write_error)
486 		retval = (channel->write_error)(channel, block, count, buf,
487 						size, actual, retval);
488 	return retval;
489 }
490 
491 
492 /*
493  * Here we implement the cache functions
494  */
495 
496 /* Allocate the cache buffers */
alloc_cache(io_channel channel,struct unix_private_data * data)497 static errcode_t alloc_cache(io_channel channel,
498 			     struct unix_private_data *data)
499 {
500 	errcode_t		retval;
501 	struct unix_cache	*cache;
502 	int			i;
503 
504 	data->access_time = 0;
505 	for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
506 		cache->block = 0;
507 		cache->access_time = 0;
508 		cache->dirty = 0;
509 		cache->in_use = 0;
510 		if (cache->buf)
511 			ext2fs_free_mem(&cache->buf);
512 		retval = io_channel_alloc_buf(channel, 0, &cache->buf);
513 		if (retval)
514 			return retval;
515 	}
516 	if (channel->align || data->flags & IO_FLAG_FORCE_BOUNCE) {
517 		if (data->bounce)
518 			ext2fs_free_mem(&data->bounce);
519 		retval = io_channel_alloc_buf(channel, 0, &data->bounce);
520 	}
521 	return retval;
522 }
523 
524 /* Free the cache buffers */
free_cache(struct unix_private_data * data)525 static void free_cache(struct unix_private_data *data)
526 {
527 	struct unix_cache	*cache;
528 	int			i;
529 
530 	data->access_time = 0;
531 	for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
532 		cache->block = 0;
533 		cache->access_time = 0;
534 		cache->dirty = 0;
535 		cache->in_use = 0;
536 		if (cache->buf)
537 			ext2fs_free_mem(&cache->buf);
538 	}
539 	if (data->bounce)
540 		ext2fs_free_mem(&data->bounce);
541 }
542 
543 #ifndef NO_IO_CACHE
544 /*
545  * Try to find a block in the cache.  If the block is not found, and
546  * eldest is a non-zero pointer, then fill in eldest with the cache
547  * entry to that should be reused.
548  */
find_cached_block(struct unix_private_data * data,unsigned long long block,struct unix_cache ** eldest)549 static struct unix_cache *find_cached_block(struct unix_private_data *data,
550 					    unsigned long long block,
551 					    struct unix_cache **eldest)
552 {
553 	struct unix_cache	*cache, *unused_cache, *oldest_cache;
554 	int			i;
555 
556 	unused_cache = oldest_cache = 0;
557 	for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
558 		if (!cache->in_use) {
559 			if (!unused_cache)
560 				unused_cache = cache;
561 			continue;
562 		}
563 		if (cache->block == block) {
564 			cache->access_time = ++data->access_time;
565 			return cache;
566 		}
567 		if (!oldest_cache ||
568 		    (cache->access_time < oldest_cache->access_time))
569 			oldest_cache = cache;
570 	}
571 	if (eldest)
572 		*eldest = (unused_cache) ? unused_cache : oldest_cache;
573 	return 0;
574 }
575 
576 /*
577  * Reuse a particular cache entry for another block.
578  */
reuse_cache(io_channel channel,struct unix_private_data * data,struct unix_cache * cache,unsigned long long block)579 static void reuse_cache(io_channel channel, struct unix_private_data *data,
580 		 struct unix_cache *cache, unsigned long long block)
581 {
582 	if (cache->dirty && cache->in_use)
583 		raw_write_blk(channel, data, cache->block, 1, cache->buf);
584 
585 	cache->in_use = 1;
586 	cache->dirty = 0;
587 	cache->block = block;
588 	cache->access_time = ++data->access_time;
589 }
590 
591 #define FLUSH_INVALIDATE	0x01
592 #define FLUSH_NOLOCK		0x02
593 
594 /*
595  * Flush all of the blocks in the cache
596  */
flush_cached_blocks(io_channel channel,struct unix_private_data * data,int flags)597 static errcode_t flush_cached_blocks(io_channel channel,
598 				     struct unix_private_data *data,
599 				     int flags)
600 {
601 	struct unix_cache	*cache;
602 	errcode_t		retval, retval2;
603 	int			i;
604 
605 	retval2 = 0;
606 	if ((flags & FLUSH_NOLOCK) == 0)
607 		mutex_lock(data, CACHE_MTX);
608 	for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
609 		if (!cache->in_use)
610 			continue;
611 
612 		if (flags & FLUSH_INVALIDATE)
613 			cache->in_use = 0;
614 
615 		if (!cache->dirty)
616 			continue;
617 
618 		retval = raw_write_blk(channel, data,
619 				       cache->block, 1, cache->buf);
620 		if (retval)
621 			retval2 = retval;
622 		else
623 			cache->dirty = 0;
624 	}
625 	if ((flags & FLUSH_NOLOCK) == 0)
626 		mutex_unlock(data, CACHE_MTX);
627 	return retval2;
628 }
629 #endif /* NO_IO_CACHE */
630 
631 #ifdef __linux__
632 #ifndef BLKDISCARDZEROES
633 #define BLKDISCARDZEROES _IO(0x12,124)
634 #endif
635 #endif
636 
ext2fs_open_file(const char * pathname,int flags,mode_t mode)637 int ext2fs_open_file(const char *pathname, int flags, mode_t mode)
638 {
639 	if (mode)
640 #if defined(HAVE_OPEN64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
641 		return open64(pathname, flags, mode);
642 	else
643 		return open64(pathname, flags);
644 #else
645 		return open(pathname, flags, mode);
646 	else
647 		return open(pathname, flags);
648 #endif
649 }
650 
ext2fs_stat(const char * path,ext2fs_struct_stat * buf)651 int ext2fs_stat(const char *path, ext2fs_struct_stat *buf)
652 {
653 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
654 	return stat64(path, buf);
655 #else
656 	return stat(path, buf);
657 #endif
658 }
659 
ext2fs_fstat(int fd,ext2fs_struct_stat * buf)660 int ext2fs_fstat(int fd, ext2fs_struct_stat *buf)
661 {
662 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
663 	return fstat64(fd, buf);
664 #else
665 	return fstat(fd, buf);
666 #endif
667 }
668 
669 
unix_open_channel(const char * name,int fd,int flags,io_channel * channel,io_manager io_mgr)670 static errcode_t unix_open_channel(const char *name, int fd,
671 				   int flags, io_channel *channel,
672 				   io_manager io_mgr)
673 {
674 	io_channel	io = NULL;
675 	struct unix_private_data *data = NULL;
676 	errcode_t	retval;
677 	ext2fs_struct_stat st;
678 #ifdef __linux__
679 	struct		utsname ut;
680 #endif
681 
682 	if (safe_getenv("UNIX_IO_FORCE_BOUNCE"))
683 		flags |= IO_FLAG_FORCE_BOUNCE;
684 
685 #ifdef __linux__
686 	/*
687 	 * We need to make sure any previous errors in the block
688 	 * device are thrown away, sigh.
689 	 */
690 	(void) fsync(fd);
691 #endif
692 
693 	retval = ext2fs_get_mem(sizeof(struct struct_io_channel), &io);
694 	if (retval)
695 		goto cleanup;
696 	memset(io, 0, sizeof(struct struct_io_channel));
697 	io->magic = EXT2_ET_MAGIC_IO_CHANNEL;
698 	retval = ext2fs_get_mem(sizeof(struct unix_private_data), &data);
699 	if (retval)
700 		goto cleanup;
701 
702 	io->manager = io_mgr;
703 	retval = ext2fs_get_mem(strlen(name)+1, &io->name);
704 	if (retval)
705 		goto cleanup;
706 
707 	strcpy(io->name, name);
708 	io->private_data = data;
709 	io->block_size = 1024;
710 	io->read_error = 0;
711 	io->write_error = 0;
712 	io->refcount = 1;
713 	io->flags = 0;
714 
715 	memset(data, 0, sizeof(struct unix_private_data));
716 	data->magic = EXT2_ET_MAGIC_UNIX_IO_CHANNEL;
717 	data->io_stats.num_fields = 2;
718 	data->flags = flags;
719 	data->dev = fd;
720 
721 #if defined(O_DIRECT)
722 	if (flags & IO_FLAG_DIRECT_IO)
723 		io->align = ext2fs_get_dio_alignment(data->dev);
724 #elif defined(F_NOCACHE)
725 	if (flags & IO_FLAG_DIRECT_IO)
726 		io->align = 4096;
727 #endif
728 
729 	/*
730 	 * If the device is really a block device, then set the
731 	 * appropriate flag, otherwise we can set DISCARD_ZEROES flag
732 	 * because we are going to use punch hole instead of discard
733 	 * and if it succeed, subsequent read from sparse area returns
734 	 * zero.
735 	 */
736 	if (ext2fs_fstat(data->dev, &st) == 0) {
737 		if (ext2fsP_is_disk_device(st.st_mode))
738 			io->flags |= CHANNEL_FLAGS_BLOCK_DEVICE;
739 		else
740 			io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
741 	}
742 
743 #ifdef BLKDISCARDZEROES
744 	{
745 		int zeroes = 0;
746 		if (ioctl(data->dev, BLKDISCARDZEROES, &zeroes) == 0 &&
747 		    zeroes)
748 			io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
749 	}
750 #endif
751 
752 #if defined(__CYGWIN__)
753 	/*
754 	 * Some operating systems require that the buffers be aligned,
755 	 * regardless of O_DIRECT
756 	 */
757 	if (!io->align)
758 		io->align = 512;
759 #endif
760 
761 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
762 	if (io->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
763 		int dio_align = ext2fs_get_dio_alignment(fd);
764 
765 		if (io->align < dio_align)
766 			io->align = dio_align;
767 	}
768 #endif
769 
770 	if ((retval = alloc_cache(io, data)))
771 		goto cleanup;
772 
773 #ifdef BLKROGET
774 	if (flags & IO_FLAG_RW) {
775 		int error;
776 		int readonly = 0;
777 
778 		/* Is the block device actually writable? */
779 		error = ioctl(data->dev, BLKROGET, &readonly);
780 		if (!error && readonly) {
781 			retval = EPERM;
782 			goto cleanup;
783 		}
784 	}
785 #endif
786 
787 #ifdef __linux__
788 #undef RLIM_INFINITY
789 #if (defined(__alpha__) || ((defined(__sparc__) || defined(__mips__)) && (SIZEOF_LONG == 4)))
790 #define RLIM_INFINITY	((unsigned long)(~0UL>>1))
791 #else
792 #define RLIM_INFINITY  (~0UL)
793 #endif
794 	/*
795 	 * Work around a bug in 2.4.10-2.4.18 kernels where writes to
796 	 * block devices are wrongly getting hit by the filesize
797 	 * limit.  This workaround isn't perfect, since it won't work
798 	 * if glibc wasn't built against 2.2 header files.  (Sigh.)
799 	 *
800 	 */
801 	if ((flags & IO_FLAG_RW) &&
802 	    (uname(&ut) == 0) &&
803 	    ((ut.release[0] == '2') && (ut.release[1] == '.') &&
804 	     (ut.release[2] == '4') && (ut.release[3] == '.') &&
805 	     (ut.release[4] == '1') && (ut.release[5] >= '0') &&
806 	     (ut.release[5] < '8')) &&
807 	    (ext2fs_fstat(data->dev, &st) == 0) &&
808 	    (ext2fsP_is_disk_device(st.st_mode))) {
809 		struct rlimit	rlim;
810 
811 		rlim.rlim_cur = rlim.rlim_max = (unsigned long) RLIM_INFINITY;
812 		setrlimit(RLIMIT_FSIZE, &rlim);
813 		getrlimit(RLIMIT_FSIZE, &rlim);
814 		if (((unsigned long) rlim.rlim_cur) <
815 		    ((unsigned long) rlim.rlim_max)) {
816 			rlim.rlim_cur = rlim.rlim_max;
817 			setrlimit(RLIMIT_FSIZE, &rlim);
818 		}
819 	}
820 #endif
821 #ifdef HAVE_PTHREAD
822 	if (flags & IO_FLAG_THREADS) {
823 		io->flags |= CHANNEL_FLAGS_THREADS;
824 		retval = pthread_mutex_init(&data->cache_mutex, NULL);
825 		if (retval)
826 			goto cleanup;
827 		retval = pthread_mutex_init(&data->bounce_mutex, NULL);
828 		if (retval) {
829 			pthread_mutex_destroy(&data->cache_mutex);
830 			goto cleanup;
831 		}
832 		retval = pthread_mutex_init(&data->stats_mutex, NULL);
833 		if (retval) {
834 			pthread_mutex_destroy(&data->cache_mutex);
835 			pthread_mutex_destroy(&data->bounce_mutex);
836 			goto cleanup;
837 		}
838 	}
839 #endif
840 	*channel = io;
841 	return 0;
842 
843 cleanup:
844 	if (data) {
845 		if (data->dev >= 0)
846 			close(data->dev);
847 		free_cache(data);
848 		ext2fs_free_mem(&data);
849 	}
850 	if (io) {
851 		if (io->name) {
852 			ext2fs_free_mem(&io->name);
853 		}
854 		ext2fs_free_mem(&io);
855 	}
856 	return retval;
857 }
858 
unixfd_open(const char * str_fd,int flags,io_channel * channel)859 static errcode_t unixfd_open(const char *str_fd, int flags,
860 			     io_channel *channel)
861 {
862 	int fd;
863 	int fd_flags;
864 
865 	fd = atoi(str_fd);
866 #if defined(HAVE_FCNTL)
867 	fd_flags = fcntl(fd, F_GETFD);
868 	if (fd_flags == -1)
869 		return EBADF;
870 
871 	flags = 0;
872 	if (fd_flags & O_RDWR)
873 		flags |= IO_FLAG_RW;
874 	if (fd_flags & O_EXCL)
875 		flags |= IO_FLAG_EXCLUSIVE;
876 #if defined(O_DIRECT)
877 	if (fd_flags & O_DIRECT)
878 		flags |= IO_FLAG_DIRECT_IO;
879 #endif
880 #endif  /* HAVE_FCNTL */
881 
882 	return unix_open_channel(str_fd, fd, flags, channel, unixfd_io_manager);
883 }
884 
unix_open(const char * name,int flags,io_channel * channel)885 static errcode_t unix_open(const char *name, int flags,
886 			   io_channel *channel)
887 {
888 	int fd = -1;
889 	int open_flags;
890 
891 	if (name == 0)
892 		return EXT2_ET_BAD_DEVICE_NAME;
893 
894 	open_flags = (flags & IO_FLAG_RW) ? O_RDWR : O_RDONLY;
895 	if (flags & IO_FLAG_EXCLUSIVE)
896 		open_flags |= O_EXCL;
897 #if defined(O_DIRECT)
898 	if (flags & IO_FLAG_DIRECT_IO)
899 		open_flags |= O_DIRECT;
900 #endif
901 	fd = ext2fs_open_file(name, open_flags, 0);
902 	if (fd < 0)
903 		return errno;
904 #if defined(F_NOCACHE) && !defined(IO_DIRECT)
905 	if (flags & IO_FLAG_DIRECT_IO) {
906 		if (fcntl(fd, F_NOCACHE, 1) < 0)
907 			return errno;
908 	}
909 #endif
910 	return unix_open_channel(name, fd, flags, channel, unix_io_manager);
911 }
912 
unix_close(io_channel channel)913 static errcode_t unix_close(io_channel channel)
914 {
915 	struct unix_private_data *data;
916 	errcode_t	retval = 0;
917 
918 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
919 	data = (struct unix_private_data *) channel->private_data;
920 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
921 
922 	if (--channel->refcount > 0)
923 		return 0;
924 
925 #ifndef NO_IO_CACHE
926 	retval = flush_cached_blocks(channel, data, 0);
927 #endif
928 
929 	if (close(data->dev) < 0)
930 		retval = errno;
931 	free_cache(data);
932 #ifdef HAVE_PTHREAD
933 	if (data->flags & IO_FLAG_THREADS) {
934 		pthread_mutex_destroy(&data->cache_mutex);
935 		pthread_mutex_destroy(&data->bounce_mutex);
936 		pthread_mutex_destroy(&data->stats_mutex);
937 	}
938 #endif
939 
940 	ext2fs_free_mem(&channel->private_data);
941 	if (channel->name)
942 		ext2fs_free_mem(&channel->name);
943 	ext2fs_free_mem(&channel);
944 	return retval;
945 }
946 
unix_set_blksize(io_channel channel,int blksize)947 static errcode_t unix_set_blksize(io_channel channel, int blksize)
948 {
949 	struct unix_private_data *data;
950 	errcode_t		retval = 0;
951 
952 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
953 	data = (struct unix_private_data *) channel->private_data;
954 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
955 
956 	if (channel->block_size != blksize) {
957 		mutex_lock(data, CACHE_MTX);
958 		mutex_lock(data, BOUNCE_MTX);
959 #ifndef NO_IO_CACHE
960 		if ((retval = flush_cached_blocks(channel, data, FLUSH_NOLOCK)))
961 			return retval;
962 #endif
963 
964 		channel->block_size = blksize;
965 		free_cache(data);
966 		retval = alloc_cache(channel, data);
967 		mutex_unlock(data, BOUNCE_MTX);
968 		mutex_unlock(data, CACHE_MTX);
969 	}
970 	return retval;
971 }
972 
unix_read_blk64(io_channel channel,unsigned long long block,int count,void * buf)973 static errcode_t unix_read_blk64(io_channel channel, unsigned long long block,
974 			       int count, void *buf)
975 {
976 	struct unix_private_data *data;
977 	struct unix_cache *cache, *reuse[READ_DIRECT_SIZE];
978 	errcode_t	retval = 0;
979 	char		*cp;
980 	int		i, j;
981 
982 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
983 	data = (struct unix_private_data *) channel->private_data;
984 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
985 
986 #ifdef NO_IO_CACHE
987 	return raw_read_blk(channel, data, block, count, buf);
988 #else
989 	if (data->flags & IO_FLAG_NOCACHE)
990 		return raw_read_blk(channel, data, block, count, buf);
991 	/*
992 	 * If we're doing an odd-sized read or a very large read,
993 	 * flush out the cache and then do a direct read.
994 	 */
995 	if (count < 0 || count > WRITE_DIRECT_SIZE) {
996 		if ((retval = flush_cached_blocks(channel, data, 0)))
997 			return retval;
998 		return raw_read_blk(channel, data, block, count, buf);
999 	}
1000 
1001 	cp = buf;
1002 	mutex_lock(data, CACHE_MTX);
1003 	while (count > 0) {
1004 		/* If it's in the cache, use it! */
1005 		if ((cache = find_cached_block(data, block, &reuse[0]))) {
1006 #ifdef DEBUG
1007 			printf("Using cached block %lu\n", block);
1008 #endif
1009 			memcpy(cp, cache->buf, channel->block_size);
1010 			count--;
1011 			block++;
1012 			cp += channel->block_size;
1013 			continue;
1014 		}
1015 		if (count == 1) {
1016 			/*
1017 			 * Special case where we read directly into the
1018 			 * cache buffer; important in the O_DIRECT case
1019 			 */
1020 			cache = reuse[0];
1021 			reuse_cache(channel, data, cache, block);
1022 			if ((retval = raw_read_blk(channel, data, block, 1,
1023 						   cache->buf))) {
1024 				cache->in_use = 0;
1025 				break;
1026 			}
1027 			memcpy(cp, cache->buf, channel->block_size);
1028 			retval = 0;
1029 			break;
1030 		}
1031 
1032 		/*
1033 		 * Find the number of uncached blocks so we can do a
1034 		 * single read request
1035 		 */
1036 		for (i=1; i < count; i++)
1037 			if (find_cached_block(data, block+i, &reuse[i]))
1038 				break;
1039 #ifdef DEBUG
1040 		printf("Reading %d blocks starting at %lu\n", i, block);
1041 #endif
1042 		if ((retval = raw_read_blk(channel, data, block, i, cp)))
1043 			break;
1044 
1045 		/* Save the results in the cache */
1046 		for (j=0; j < i; j++) {
1047 			count--;
1048 			cache = reuse[j];
1049 			reuse_cache(channel, data, cache, block++);
1050 			memcpy(cache->buf, cp, channel->block_size);
1051 			cp += channel->block_size;
1052 		}
1053 	}
1054 	mutex_unlock(data, CACHE_MTX);
1055 	return retval;
1056 #endif /* NO_IO_CACHE */
1057 }
1058 
unix_read_blk(io_channel channel,unsigned long block,int count,void * buf)1059 static errcode_t unix_read_blk(io_channel channel, unsigned long block,
1060 			       int count, void *buf)
1061 {
1062 	return unix_read_blk64(channel, block, count, buf);
1063 }
1064 
unix_write_blk64(io_channel channel,unsigned long long block,int count,const void * buf)1065 static errcode_t unix_write_blk64(io_channel channel, unsigned long long block,
1066 				int count, const void *buf)
1067 {
1068 	struct unix_private_data *data;
1069 	struct unix_cache *cache, *reuse;
1070 	errcode_t	retval = 0;
1071 	const char	*cp;
1072 	int		writethrough;
1073 
1074 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1075 	data = (struct unix_private_data *) channel->private_data;
1076 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1077 
1078 #ifdef NO_IO_CACHE
1079 	return raw_write_blk(channel, data, block, count, buf);
1080 #else
1081 	if (data->flags & IO_FLAG_NOCACHE)
1082 		return raw_write_blk(channel, data, block, count, buf);
1083 	/*
1084 	 * If we're doing an odd-sized write or a very large write,
1085 	 * flush out the cache completely and then do a direct write.
1086 	 */
1087 	if (count < 0 || count > WRITE_DIRECT_SIZE) {
1088 		if ((retval = flush_cached_blocks(channel, data,
1089 						  FLUSH_INVALIDATE)))
1090 			return retval;
1091 		return raw_write_blk(channel, data, block, count, buf);
1092 	}
1093 
1094 	/*
1095 	 * For a moderate-sized multi-block write, first force a write
1096 	 * if we're in write-through cache mode, and then fill the
1097 	 * cache with the blocks.
1098 	 */
1099 	writethrough = channel->flags & CHANNEL_FLAGS_WRITETHROUGH;
1100 	if (writethrough)
1101 		retval = raw_write_blk(channel, data, block, count, buf);
1102 
1103 	cp = buf;
1104 	mutex_lock(data, CACHE_MTX);
1105 	while (count > 0) {
1106 		cache = find_cached_block(data, block, &reuse);
1107 		if (!cache) {
1108 			cache = reuse;
1109 			reuse_cache(channel, data, cache, block);
1110 		}
1111 		if (cache->buf != cp)
1112 			memcpy(cache->buf, cp, channel->block_size);
1113 		cache->dirty = !writethrough;
1114 		count--;
1115 		block++;
1116 		cp += channel->block_size;
1117 	}
1118 	mutex_unlock(data, CACHE_MTX);
1119 	return retval;
1120 #endif /* NO_IO_CACHE */
1121 }
1122 
unix_cache_readahead(io_channel channel,unsigned long long block,unsigned long long count)1123 static errcode_t unix_cache_readahead(io_channel channel,
1124 				      unsigned long long block,
1125 				      unsigned long long count)
1126 {
1127 #ifdef POSIX_FADV_WILLNEED
1128 	struct unix_private_data *data;
1129 
1130 	data = (struct unix_private_data *)channel->private_data;
1131 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1132 	return posix_fadvise(data->dev,
1133 			     (ext2_loff_t)block * channel->block_size + data->offset,
1134 			     (ext2_loff_t)count * channel->block_size,
1135 			     POSIX_FADV_WILLNEED);
1136 #else
1137 	return EXT2_ET_OP_NOT_SUPPORTED;
1138 #endif
1139 }
1140 
unix_write_blk(io_channel channel,unsigned long block,int count,const void * buf)1141 static errcode_t unix_write_blk(io_channel channel, unsigned long block,
1142 				int count, const void *buf)
1143 {
1144 	return unix_write_blk64(channel, block, count, buf);
1145 }
1146 
unix_write_byte(io_channel channel,unsigned long offset,int size,const void * buf)1147 static errcode_t unix_write_byte(io_channel channel, unsigned long offset,
1148 				 int size, const void *buf)
1149 {
1150 	struct unix_private_data *data;
1151 	errcode_t	retval = 0;
1152 	ssize_t		actual;
1153 
1154 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1155 	data = (struct unix_private_data *) channel->private_data;
1156 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1157 
1158 	if (channel->align != 0) {
1159 #ifdef ALIGN_DEBUG
1160 		printf("unix_write_byte: O_DIRECT fallback\n");
1161 #endif
1162 		return EXT2_ET_UNIMPLEMENTED;
1163 	}
1164 
1165 #ifndef NO_IO_CACHE
1166 	/*
1167 	 * Flush out the cache completely
1168 	 */
1169 	if ((retval = flush_cached_blocks(channel, data, FLUSH_INVALIDATE)))
1170 		return retval;
1171 #endif
1172 
1173 	if (lseek(data->dev, offset + data->offset, SEEK_SET) < 0)
1174 		return errno;
1175 
1176 	actual = write(data->dev, buf, size);
1177 	if (actual < 0)
1178 		return errno;
1179 	if (actual != size)
1180 		return EXT2_ET_SHORT_WRITE;
1181 
1182 	return 0;
1183 }
1184 
1185 /*
1186  * Flush data buffers to disk.
1187  */
unix_flush(io_channel channel)1188 static errcode_t unix_flush(io_channel channel)
1189 {
1190 	struct unix_private_data *data;
1191 	errcode_t retval = 0;
1192 
1193 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1194 	data = (struct unix_private_data *) channel->private_data;
1195 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1196 
1197 #ifndef NO_IO_CACHE
1198 	retval = flush_cached_blocks(channel, data, 0);
1199 #endif
1200 #ifdef HAVE_FSYNC
1201 	if (!retval && fsync(data->dev) != 0)
1202 		return errno;
1203 #endif
1204 	return retval;
1205 }
1206 
unix_set_option(io_channel channel,const char * option,const char * arg)1207 static errcode_t unix_set_option(io_channel channel, const char *option,
1208 				 const char *arg)
1209 {
1210 	struct unix_private_data *data;
1211 	unsigned long long tmp;
1212 	errcode_t retval;
1213 	char *end;
1214 
1215 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1216 	data = (struct unix_private_data *) channel->private_data;
1217 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1218 
1219 	if (!strcmp(option, "offset")) {
1220 		if (!arg)
1221 			return EXT2_ET_INVALID_ARGUMENT;
1222 
1223 		tmp = strtoull(arg, &end, 0);
1224 		if (*end)
1225 			return EXT2_ET_INVALID_ARGUMENT;
1226 		data->offset = tmp;
1227 		if (data->offset < 0)
1228 			return EXT2_ET_INVALID_ARGUMENT;
1229 		return 0;
1230 	}
1231 	if (!strcmp(option, "cache")) {
1232 		if (!arg)
1233 			return EXT2_ET_INVALID_ARGUMENT;
1234 		if (!strcmp(arg, "on")) {
1235 			data->flags &= ~IO_FLAG_NOCACHE;
1236 			return 0;
1237 		}
1238 		if (!strcmp(arg, "off")) {
1239 			retval = flush_cached_blocks(channel, data, 0);
1240 			data->flags |= IO_FLAG_NOCACHE;
1241 			return retval;
1242 		}
1243 		return EXT2_ET_INVALID_ARGUMENT;
1244 	}
1245 	return EXT2_ET_INVALID_ARGUMENT;
1246 }
1247 
1248 #if defined(__linux__) && !defined(BLKDISCARD)
1249 #define BLKDISCARD		_IO(0x12,119)
1250 #endif
1251 
unix_discard(io_channel channel,unsigned long long block,unsigned long long count)1252 static errcode_t unix_discard(io_channel channel, unsigned long long block,
1253 			      unsigned long long count)
1254 {
1255 	struct unix_private_data *data;
1256 	int		ret;
1257 
1258 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1259 	data = (struct unix_private_data *) channel->private_data;
1260 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1261 
1262 	if (channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
1263 #ifdef BLKDISCARD
1264 		__u64 range[2];
1265 
1266 		range[0] = (__u64)(block) * channel->block_size + data->offset;
1267 		range[1] = (__u64)(count) * channel->block_size;
1268 
1269 		ret = ioctl(data->dev, BLKDISCARD, &range);
1270 #else
1271 		goto unimplemented;
1272 #endif
1273 	} else {
1274 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE)
1275 		/*
1276 		 * If we are not on block device, try to use punch hole
1277 		 * to reclaim free space.
1278 		 */
1279 		ret = fallocate(data->dev,
1280 				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1281 				(off_t)(block) * channel->block_size + data->offset,
1282 				(off_t)(count) * channel->block_size);
1283 #else
1284 		goto unimplemented;
1285 #endif
1286 	}
1287 	if (ret < 0) {
1288 		if (errno == EOPNOTSUPP)
1289 			goto unimplemented;
1290 		return errno;
1291 	}
1292 	return 0;
1293 unimplemented:
1294 	return EXT2_ET_UNIMPLEMENTED;
1295 }
1296 
1297 /*
1298  * If we know about ZERO_RANGE, try that before we try PUNCH_HOLE because
1299  * ZERO_RANGE doesn't unmap preallocated blocks.  We prefer fallocate because
1300  * it always invalidates page cache, and libext2fs requires that reads after
1301  * ZERO_RANGE return zeroes.
1302  */
__unix_zeroout(int fd,off_t offset,off_t len)1303 static int __unix_zeroout(int fd, off_t offset, off_t len)
1304 {
1305 	int ret = -1;
1306 
1307 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_ZERO_RANGE)
1308 	ret = fallocate(fd, FALLOC_FL_ZERO_RANGE, offset, len);
1309 	if (ret == 0)
1310 		return 0;
1311 #endif
1312 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
1313 	ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1314 			offset,  len);
1315 	if (ret == 0)
1316 		return 0;
1317 #endif
1318 	errno = EOPNOTSUPP;
1319 	return ret;
1320 }
1321 
1322 /* parameters might not be used if OS doesn't support zeroout */
1323 #if __GNUC_PREREQ (4, 6)
1324 #pragma GCC diagnostic push
1325 #pragma GCC diagnostic ignored "-Wunused-parameter"
1326 #endif
unix_zeroout(io_channel channel,unsigned long long block,unsigned long long count)1327 static errcode_t unix_zeroout(io_channel channel, unsigned long long block,
1328 			      unsigned long long count)
1329 {
1330 	struct unix_private_data *data;
1331 	int		ret;
1332 
1333 	EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1334 	data = (struct unix_private_data *) channel->private_data;
1335 	EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1336 
1337 	if (safe_getenv("UNIX_IO_NOZEROOUT"))
1338 		goto unimplemented;
1339 
1340 	if (!(channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE)) {
1341 		/* Regular file, try to use truncate/punch/zero. */
1342 		struct stat statbuf;
1343 
1344 		if (count == 0)
1345 			return 0;
1346 		/*
1347 		 * If we're trying to zero a range past the end of the file,
1348 		 * extend the file size, then truncate everything.
1349 		 */
1350 		ret = fstat(data->dev, &statbuf);
1351 		if (ret)
1352 			goto err;
1353 		if ((unsigned long long) statbuf.st_size <
1354 			(block + count) * channel->block_size + data->offset) {
1355 			ret = ftruncate(data->dev,
1356 					(block + count) * channel->block_size + data->offset);
1357 			if (ret)
1358 				goto err;
1359 		}
1360 	}
1361 
1362 	ret = __unix_zeroout(data->dev,
1363 			(off_t)(block) * channel->block_size + data->offset,
1364 			(off_t)(count) * channel->block_size);
1365 err:
1366 	if (ret < 0) {
1367 		if (errno == EOPNOTSUPP)
1368 			goto unimplemented;
1369 		return errno;
1370 	}
1371 	return 0;
1372 unimplemented:
1373 	return EXT2_ET_UNIMPLEMENTED;
1374 }
1375 #if __GNUC_PREREQ (4, 6)
1376 #pragma GCC diagnostic pop
1377 #endif
1378 
1379 static struct struct_io_manager struct_unix_manager = {
1380 	.magic		= EXT2_ET_MAGIC_IO_MANAGER,
1381 	.name		= "Unix I/O Manager",
1382 	.open		= unix_open,
1383 	.close		= unix_close,
1384 	.set_blksize	= unix_set_blksize,
1385 	.read_blk	= unix_read_blk,
1386 	.write_blk	= unix_write_blk,
1387 	.flush		= unix_flush,
1388 	.write_byte	= unix_write_byte,
1389 	.set_option	= unix_set_option,
1390 	.get_stats	= unix_get_stats,
1391 	.read_blk64	= unix_read_blk64,
1392 	.write_blk64	= unix_write_blk64,
1393 	.discard	= unix_discard,
1394 	.cache_readahead	= unix_cache_readahead,
1395 	.zeroout	= unix_zeroout,
1396 };
1397 
1398 io_manager unix_io_manager = &struct_unix_manager;
1399 
1400 static struct struct_io_manager struct_unixfd_manager = {
1401 	.magic		= EXT2_ET_MAGIC_IO_MANAGER,
1402 	.name		= "Unix fd I/O Manager",
1403 	.open		= unixfd_open,
1404 	.close		= unix_close,
1405 	.set_blksize	= unix_set_blksize,
1406 	.read_blk	= unix_read_blk,
1407 	.write_blk	= unix_write_blk,
1408 	.flush		= unix_flush,
1409 	.write_byte	= unix_write_byte,
1410 	.set_option	= unix_set_option,
1411 	.get_stats	= unix_get_stats,
1412 	.read_blk64	= unix_read_blk64,
1413 	.write_blk64	= unix_write_blk64,
1414 	.discard	= unix_discard,
1415 	.cache_readahead	= unix_cache_readahead,
1416 	.zeroout	= unix_zeroout,
1417 };
1418 
1419 io_manager unixfd_io_manager = &struct_unixfd_manager;
1420