• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/hpfs/buffer.c
3  *
4  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
5  *
6  *  general buffer i/o
7  */
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include "hpfs_fn.h"
12 
hpfs_search_hotfix_map(struct super_block * s,secno sec)13 secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
14 {
15 	unsigned i;
16 	struct hpfs_sb_info *sbi = hpfs_sb(s);
17 	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
18 		if (sbi->hotfix_from[i] == sec) {
19 			return sbi->hotfix_to[i];
20 		}
21 	}
22 	return sec;
23 }
24 
hpfs_search_hotfix_map_for_range(struct super_block * s,secno sec,unsigned n)25 unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
26 {
27 	unsigned i;
28 	struct hpfs_sb_info *sbi = hpfs_sb(s);
29 	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
30 		if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
31 			n = sbi->hotfix_from[i] - sec;
32 		}
33 	}
34 	return n;
35 }
36 
hpfs_prefetch_sectors(struct super_block * s,unsigned secno,int n)37 void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
38 {
39 	struct buffer_head *bh;
40 	struct blk_plug plug;
41 
42 	if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
43 		return;
44 
45 	if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
46 		return;
47 
48 	bh = sb_find_get_block(s, secno);
49 	if (bh) {
50 		if (buffer_uptodate(bh)) {
51 			brelse(bh);
52 			return;
53 		}
54 		brelse(bh);
55 	};
56 
57 	blk_start_plug(&plug);
58 	while (n > 0) {
59 		if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
60 			break;
61 		sb_breadahead(s, secno);
62 		secno++;
63 		n--;
64 	}
65 	blk_finish_plug(&plug);
66 }
67 
68 /* Map a sector into a buffer and return pointers to it and to the buffer. */
69 
hpfs_map_sector(struct super_block * s,unsigned secno,struct buffer_head ** bhp,int ahead)70 void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
71 		 int ahead)
72 {
73 	struct buffer_head *bh;
74 
75 	hpfs_lock_assert(s);
76 
77 	hpfs_prefetch_sectors(s, secno, ahead);
78 
79 	cond_resched();
80 
81 	*bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
82 	if (bh != NULL)
83 		return bh->b_data;
84 	else {
85 		pr_err("%s(): read error\n", __func__);
86 		return NULL;
87 	}
88 }
89 
90 /* Like hpfs_map_sector but don't read anything */
91 
hpfs_get_sector(struct super_block * s,unsigned secno,struct buffer_head ** bhp)92 void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
93 {
94 	struct buffer_head *bh;
95 	/*return hpfs_map_sector(s, secno, bhp, 0);*/
96 
97 	hpfs_lock_assert(s);
98 
99 	cond_resched();
100 
101 	if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
102 		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
103 		set_buffer_uptodate(bh);
104 		return bh->b_data;
105 	} else {
106 		pr_err("%s(): getblk failed\n", __func__);
107 		return NULL;
108 	}
109 }
110 
111 /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
112 
hpfs_map_4sectors(struct super_block * s,unsigned secno,struct quad_buffer_head * qbh,int ahead)113 void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
114 		   int ahead)
115 {
116 	char *data;
117 
118 	hpfs_lock_assert(s);
119 
120 	cond_resched();
121 
122 	if (secno & 3) {
123 		pr_err("%s(): unaligned read\n", __func__);
124 		return NULL;
125 	}
126 
127 	hpfs_prefetch_sectors(s, secno, 4 + ahead);
128 
129 	if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
130 	if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
131 	if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
132 	if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
133 
134 	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
135 	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
136 	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
137 		return qbh->data = qbh->bh[0]->b_data;
138 	}
139 
140 	qbh->data = data = kmalloc(2048, GFP_NOFS);
141 	if (!data) {
142 		pr_err("%s(): out of memory\n", __func__);
143 		goto bail4;
144 	}
145 
146 	memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
147 	memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
148 	memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
149 	memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
150 
151 	return data;
152 
153  bail4:
154 	brelse(qbh->bh[3]);
155  bail3:
156 	brelse(qbh->bh[2]);
157  bail2:
158 	brelse(qbh->bh[1]);
159  bail1:
160 	brelse(qbh->bh[0]);
161  bail0:
162 	return NULL;
163 }
164 
165 /* Don't read sectors */
166 
hpfs_get_4sectors(struct super_block * s,unsigned secno,struct quad_buffer_head * qbh)167 void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
168                           struct quad_buffer_head *qbh)
169 {
170 	cond_resched();
171 
172 	hpfs_lock_assert(s);
173 
174 	if (secno & 3) {
175 		pr_err("%s(): unaligned read\n", __func__);
176 		return NULL;
177 	}
178 
179 	if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
180 	if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
181 	if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
182 	if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
183 
184 	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
185 	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
186 	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
187 		return qbh->data = qbh->bh[0]->b_data;
188 	}
189 
190 	if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
191 		pr_err("%s(): out of memory\n", __func__);
192 		goto bail4;
193 	}
194 	return qbh->data;
195 
196 bail4:
197 	brelse(qbh->bh[3]);
198 bail3:
199 	brelse(qbh->bh[2]);
200 bail2:
201 	brelse(qbh->bh[1]);
202 bail1:
203 	brelse(qbh->bh[0]);
204 bail0:
205 	return NULL;
206 }
207 
208 
hpfs_brelse4(struct quad_buffer_head * qbh)209 void hpfs_brelse4(struct quad_buffer_head *qbh)
210 {
211 	if (unlikely(qbh->data != qbh->bh[0]->b_data))
212 		kfree(qbh->data);
213 	brelse(qbh->bh[0]);
214 	brelse(qbh->bh[1]);
215 	brelse(qbh->bh[2]);
216 	brelse(qbh->bh[3]);
217 }
218 
hpfs_mark_4buffers_dirty(struct quad_buffer_head * qbh)219 void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
220 {
221 	if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
222 		memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
223 		memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
224 		memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
225 		memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
226 	}
227 	mark_buffer_dirty(qbh->bh[0]);
228 	mark_buffer_dirty(qbh->bh[1]);
229 	mark_buffer_dirty(qbh->bh[2]);
230 	mark_buffer_dirty(qbh->bh[3]);
231 }
232