• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009-2011 Red Hat, Inc.
3  *
4  * Author: Mikulas Patocka <mpatocka@redhat.com>
5  *
6  * This file is released under the GPL.
7  */
8 
9 #ifndef _LINUX_DM_BUFIO_H
10 #define _LINUX_DM_BUFIO_H
11 
12 #include <linux/blkdev.h>
13 #include <linux/types.h>
14 
15 /*----------------------------------------------------------------*/
16 
17 struct dm_bufio_client;
18 struct dm_buffer;
19 
20 /*
21  * Create a buffered IO cache on a given device
22  */
23 struct dm_bufio_client *
24 dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
25 		       unsigned reserved_buffers, unsigned aux_size,
26 		       void (*alloc_callback)(struct dm_buffer *),
27 		       void (*write_callback)(struct dm_buffer *));
28 
29 /*
30  * Release a buffered IO cache.
31  */
32 void dm_bufio_client_destroy(struct dm_bufio_client *c);
33 
34 /*
35  * Set the sector range.
36  * When this function is called, there must be no I/O in progress on the bufio
37  * client.
38  */
39 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
40 
41 /*
42  * WARNING: to avoid deadlocks, these conditions are observed:
43  *
44  * - At most one thread can hold at most "reserved_buffers" simultaneously.
45  * - Each other threads can hold at most one buffer.
46  * - Threads which call only dm_bufio_get can hold unlimited number of
47  *   buffers.
48  */
49 
50 /*
51  * Read a given block from disk. Returns pointer to data.  Returns a
52  * pointer to dm_buffer that can be used to release the buffer or to make
53  * it dirty.
54  */
55 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
56 		    struct dm_buffer **bp);
57 
58 /*
59  * Like dm_bufio_read, but return buffer from cache, don't read
60  * it. If the buffer is not in the cache, return NULL.
61  */
62 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
63 		   struct dm_buffer **bp);
64 
65 /*
66  * Like dm_bufio_read, but don't read anything from the disk.  It is
67  * expected that the caller initializes the buffer and marks it dirty.
68  */
69 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
70 		   struct dm_buffer **bp);
71 
72 /*
73  * Prefetch the specified blocks to the cache.
74  * The function starts to read the blocks and returns without waiting for
75  * I/O to finish.
76  */
77 void dm_bufio_prefetch(struct dm_bufio_client *c,
78 		       sector_t block, unsigned n_blocks);
79 
80 /*
81  * Release a reference obtained with dm_bufio_{read,get,new}. The data
82  * pointer and dm_buffer pointer is no longer valid after this call.
83  */
84 void dm_bufio_release(struct dm_buffer *b);
85 
86 /*
87  * Mark a buffer dirty. It should be called after the buffer is modified.
88  *
89  * In case of memory pressure, the buffer may be written after
90  * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers.  So
91  * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
92  * the actual writing may occur earlier.
93  */
94 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
95 
96 /*
97  * Mark a part of the buffer dirty.
98  *
99  * The specified part of the buffer is scheduled to be written. dm-bufio may
100  * write the specified part of the buffer or it may write a larger superset.
101  */
102 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
103 					unsigned start, unsigned end);
104 
105 /*
106  * Initiate writing of dirty buffers, without waiting for completion.
107  */
108 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
109 
110 /*
111  * Write all dirty buffers. Guarantees that all dirty buffers created prior
112  * to this call are on disk when this call exits.
113  */
114 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
115 
116 /*
117  * Send an empty write barrier to the device to flush hardware disk cache.
118  */
119 int dm_bufio_issue_flush(struct dm_bufio_client *c);
120 
121 /*
122  * Send a discard request to the underlying device.
123  */
124 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
125 
126 /*
127  * Like dm_bufio_release but also move the buffer to the new
128  * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
129  */
130 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
131 
132 /*
133  * Free the given buffer.
134  * This is just a hint, if the buffer is in use or dirty, this function
135  * does nothing.
136  */
137 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
138 
139 /*
140  * Free the given range of buffers.
141  * This is just a hint, if the buffer is in use or dirty, this function
142  * does nothing.
143  */
144 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
145 
146 /*
147  * Set the minimum number of buffers before cleanup happens.
148  */
149 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
150 
151 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
152 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
153 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
154 sector_t dm_bufio_get_block_number(struct dm_buffer *b);
155 void *dm_bufio_get_block_data(struct dm_buffer *b);
156 void *dm_bufio_get_aux_data(struct dm_buffer *b);
157 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
158 
159 /*----------------------------------------------------------------*/
160 
161 #endif
162