• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/fs/ufs/util.h
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  */
8 
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include "swab.h"
12 
13 
14 /*
15  * some useful macros
16  */
17 #define in_range(b,first,len)	((b)>=(first)&&(b)<(first)+(len))
18 
19 /*
20  * functions used for retyping
21  */
UCPI_UBH(struct ufs_cg_private_info * cpi)22 static inline struct ufs_buffer_head *UCPI_UBH(struct ufs_cg_private_info *cpi)
23 {
24 	return &cpi->c_ubh;
25 }
USPI_UBH(struct ufs_sb_private_info * spi)26 static inline struct ufs_buffer_head *USPI_UBH(struct ufs_sb_private_info *spi)
27 {
28 	return &spi->s_ubh;
29 }
30 
31 
32 
33 /*
34  * macros used for accessing structures
35  */
36 static inline s32
ufs_get_fs_state(struct super_block * sb,struct ufs_super_block_first * usb1,struct ufs_super_block_third * usb3)37 ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
38 		 struct ufs_super_block_third *usb3)
39 {
40 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
41 	case UFS_ST_SUNOS:
42 		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT)
43 			return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state);
44 		/* Fall Through to UFS_ST_SUN */
45 	case UFS_ST_SUN:
46 		return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state);
47 	case UFS_ST_SUNx86:
48 		return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
49 	case UFS_ST_44BSD:
50 	default:
51 		return fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_state);
52 	}
53 }
54 
55 static inline void
ufs_set_fs_state(struct super_block * sb,struct ufs_super_block_first * usb1,struct ufs_super_block_third * usb3,s32 value)56 ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
57 		 struct ufs_super_block_third *usb3, s32 value)
58 {
59 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
60 	case UFS_ST_SUNOS:
61 		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) {
62 			usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value);
63 			break;
64 		}
65 		/* Fall Through to UFS_ST_SUN */
66 	case UFS_ST_SUN:
67 		usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value);
68 		break;
69 	case UFS_ST_SUNx86:
70 		usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
71 		break;
72 	case UFS_ST_44BSD:
73 		usb3->fs_un2.fs_44.fs_state = cpu_to_fs32(sb, value);
74 		break;
75 	}
76 }
77 
78 static inline u32
ufs_get_fs_npsect(struct super_block * sb,struct ufs_super_block_first * usb1,struct ufs_super_block_third * usb3)79 ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
80 		  struct ufs_super_block_third *usb3)
81 {
82 	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
83 		return fs32_to_cpu(sb, usb3->fs_un2.fs_sunx86.fs_npsect);
84 	else
85 		return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
86 }
87 
88 static inline u64
ufs_get_fs_qbmask(struct super_block * sb,struct ufs_super_block_third * usb3)89 ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
90 {
91 	__fs64 tmp;
92 
93 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
94 	case UFS_ST_SUNOS:
95 	case UFS_ST_SUN:
96 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qbmask[0];
97 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qbmask[1];
98 		break;
99 	case UFS_ST_SUNx86:
100 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qbmask[0];
101 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qbmask[1];
102 		break;
103 	case UFS_ST_44BSD:
104 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qbmask[0];
105 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qbmask[1];
106 		break;
107 	}
108 
109 	return fs64_to_cpu(sb, tmp);
110 }
111 
112 static inline u64
ufs_get_fs_qfmask(struct super_block * sb,struct ufs_super_block_third * usb3)113 ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
114 {
115 	__fs64 tmp;
116 
117 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
118 	case UFS_ST_SUNOS:
119 	case UFS_ST_SUN:
120 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qfmask[0];
121 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qfmask[1];
122 		break;
123 	case UFS_ST_SUNx86:
124 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qfmask[0];
125 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qfmask[1];
126 		break;
127 	case UFS_ST_44BSD:
128 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qfmask[0];
129 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qfmask[1];
130 		break;
131 	}
132 
133 	return fs64_to_cpu(sb, tmp);
134 }
135 
136 static inline u16
ufs_get_de_namlen(struct super_block * sb,struct ufs_dir_entry * de)137 ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
138 {
139 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
140 		return fs16_to_cpu(sb, de->d_u.d_namlen);
141 	else
142 		return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
143 }
144 
145 static inline void
ufs_set_de_namlen(struct super_block * sb,struct ufs_dir_entry * de,u16 value)146 ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
147 {
148 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
149 		de->d_u.d_namlen = cpu_to_fs16(sb, value);
150 	else
151 		de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
152 }
153 
154 static inline void
ufs_set_de_type(struct super_block * sb,struct ufs_dir_entry * de,int mode)155 ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
156 {
157 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
158 		return;
159 
160 	/*
161 	 * TODO turn this into a table lookup
162 	 */
163 	switch (mode & S_IFMT) {
164 	case S_IFSOCK:
165 		de->d_u.d_44.d_type = DT_SOCK;
166 		break;
167 	case S_IFLNK:
168 		de->d_u.d_44.d_type = DT_LNK;
169 		break;
170 	case S_IFREG:
171 		de->d_u.d_44.d_type = DT_REG;
172 		break;
173 	case S_IFBLK:
174 		de->d_u.d_44.d_type = DT_BLK;
175 		break;
176 	case S_IFDIR:
177 		de->d_u.d_44.d_type = DT_DIR;
178 		break;
179 	case S_IFCHR:
180 		de->d_u.d_44.d_type = DT_CHR;
181 		break;
182 	case S_IFIFO:
183 		de->d_u.d_44.d_type = DT_FIFO;
184 		break;
185 	default:
186 		de->d_u.d_44.d_type = DT_UNKNOWN;
187 	}
188 }
189 
190 static inline u32
ufs_get_inode_uid(struct super_block * sb,struct ufs_inode * inode)191 ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
192 {
193 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
194 	case UFS_UID_44BSD:
195 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
196 	case UFS_UID_EFT:
197 		if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
198 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
199 		/* Fall through */
200 	default:
201 		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
202 	}
203 }
204 
205 static inline void
ufs_set_inode_uid(struct super_block * sb,struct ufs_inode * inode,u32 value)206 ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
207 {
208 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
209 	case UFS_UID_44BSD:
210 		inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
211 		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
212 		break;
213 	case UFS_UID_EFT:
214 		inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
215 		if (value > 0xFFFF)
216 			value = 0xFFFF;
217 		/* Fall through */
218 	default:
219 		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
220 		break;
221 	}
222 }
223 
224 static inline u32
ufs_get_inode_gid(struct super_block * sb,struct ufs_inode * inode)225 ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
226 {
227 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
228 	case UFS_UID_44BSD:
229 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
230 	case UFS_UID_EFT:
231 		if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
232 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
233 		/* Fall through */
234 	default:
235 		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
236 	}
237 }
238 
239 static inline void
ufs_set_inode_gid(struct super_block * sb,struct ufs_inode * inode,u32 value)240 ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
241 {
242 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
243 	case UFS_UID_44BSD:
244 		inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
245 		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
246 		break;
247 	case UFS_UID_EFT:
248 		inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
249 		if (value > 0xFFFF)
250 			value = 0xFFFF;
251 		/* Fall through */
252 	default:
253 		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
254 		break;
255 	}
256 }
257 
258 extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
259 extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
260 extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len);
261 
262 /*
263  * These functions manipulate ufs buffers
264  */
265 #define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)
266 extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
267 extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
268 extern void ubh_brelse (struct ufs_buffer_head *);
269 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
270 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
271 extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
272 extern void ubh_sync_block(struct ufs_buffer_head *);
273 extern void ubh_bforget (struct ufs_buffer_head *);
274 extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
275 #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
276 extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struct ufs_buffer_head *, unsigned);
277 #define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size)
278 extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
279 
280 /* This functions works with cache pages*/
281 extern struct page *ufs_get_locked_page(struct address_space *mapping,
282 					pgoff_t index);
ufs_put_locked_page(struct page * page)283 static inline void ufs_put_locked_page(struct page *page)
284 {
285        unlock_page(page);
286        page_cache_release(page);
287 }
288 
289 
290 /*
291  * macros and inline function to get important structures from ufs_sb_private_info
292  */
293 
get_usb_offset(struct ufs_sb_private_info * uspi,unsigned int offset)294 static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
295 				   unsigned int offset)
296 {
297 	unsigned int index;
298 
299 	index = offset >> uspi->s_fshift;
300 	offset &= ~uspi->s_fmask;
301 	return uspi->s_ubh.bh[index]->b_data + offset;
302 }
303 
304 #define ubh_get_usb_first(uspi) \
305 	((struct ufs_super_block_first *)get_usb_offset((uspi), 0))
306 
307 #define ubh_get_usb_second(uspi) \
308 	((struct ufs_super_block_second *)get_usb_offset((uspi), UFS_SECTOR_SIZE))
309 
310 #define ubh_get_usb_third(uspi)	\
311 	((struct ufs_super_block_third *)get_usb_offset((uspi), 2*UFS_SECTOR_SIZE))
312 
313 
314 #define ubh_get_ucg(ubh) \
315 	((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
316 
317 
318 /*
319  * Extract byte from ufs_buffer_head
320  * Extract the bits for a block from a map inside ufs_buffer_head
321  */
322 #define ubh_get_addr8(ubh,begin) \
323 	((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
324 	((begin) & ~uspi->s_fmask))
325 
326 #define ubh_get_addr16(ubh,begin) \
327 	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
328 	((begin) & ((uspi->fsize>>1) - 1)))
329 
330 #define ubh_get_addr32(ubh,begin) \
331 	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
332 	((begin) & ((uspi->s_fsize>>2) - 1)))
333 
334 #define ubh_get_addr64(ubh,begin) \
335 	(((__fs64*)((ubh)->bh[(begin) >> (uspi->s_fshift-3)]->b_data)) + \
336 	((begin) & ((uspi->s_fsize>>3) - 1)))
337 
338 #define ubh_get_addr ubh_get_addr8
339 
ubh_get_data_ptr(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,u64 blk)340 static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
341 				     struct ufs_buffer_head *ubh,
342 				     u64 blk)
343 {
344 	if (uspi->fs_magic == UFS2_MAGIC)
345 		return ubh_get_addr64(ubh, blk);
346 	else
347 		return ubh_get_addr32(ubh, blk);
348 }
349 
350 #define ubh_blkmap(ubh,begin,bit) \
351 	((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
352 
353 /*
354  * Determine the number of available frags given a
355  * percentage to hold in reserve.
356  */
357 static inline u64
ufs_freespace(struct ufs_sb_private_info * uspi,int percentreserved)358 ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved)
359 {
360 	return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
361 		uspi->cs_total.cs_nffree -
362 		(uspi->s_dsize * (percentreserved) / 100);
363 }
364 
365 /*
366  * Macros to access cylinder group array structures
367  */
368 #define ubh_cg_blktot(ucpi,cylno) \
369 	(*((__fs32*)ubh_get_addr(UCPI_UBH(ucpi), (ucpi)->c_btotoff + ((cylno) << 2))))
370 
371 #define ubh_cg_blks(ucpi,cylno,rpos) \
372 	(*((__fs16*)ubh_get_addr(UCPI_UBH(ucpi), \
373 	(ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
374 
375 /*
376  * Bitmap operations
377  * These functions work like classical bitmap operations.
378  * The difference is that we don't have the whole bitmap
379  * in one contiguous chunk of memory, but in several buffers.
380  * The parameters of each function are super_block, ufs_buffer_head and
381  * position of the beginning of the bitmap.
382  */
383 #define ubh_setbit(ubh,begin,bit) \
384 	(*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))
385 
386 #define ubh_clrbit(ubh,begin,bit) \
387 	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))
388 
389 #define ubh_isset(ubh,begin,bit) \
390 	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))
391 
392 #define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))
393 
394 #define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)
395 
396 #define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
_ubh_find_next_zero_bit_(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned size,unsigned offset)397 static inline unsigned _ubh_find_next_zero_bit_(
398 	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
399 	unsigned begin, unsigned size, unsigned offset)
400 {
401 	unsigned base, count, pos;
402 
403 	size -= offset;
404 	begin <<= 3;
405 	offset += begin;
406 	base = offset >> uspi->s_bpfshift;
407 	offset &= uspi->s_bpfmask;
408 	for (;;) {
409 		count = min_t(unsigned int, size + offset, uspi->s_bpf);
410 		size -= count - offset;
411 		pos = find_next_zero_bit_le(ubh->bh[base]->b_data, count, offset);
412 		if (pos < count || !size)
413 			break;
414 		base++;
415 		offset = 0;
416 	}
417 	return (base << uspi->s_bpfshift) + pos - begin;
418 }
419 
find_last_zero_bit(unsigned char * bitmap,unsigned size,unsigned offset)420 static inline unsigned find_last_zero_bit (unsigned char * bitmap,
421 	unsigned size, unsigned offset)
422 {
423 	unsigned bit, i;
424 	unsigned char * mapp;
425 	unsigned char map;
426 
427 	mapp = bitmap + (size >> 3);
428 	map = *mapp--;
429 	bit = 1 << (size & 7);
430 	for (i = size; i > offset; i--) {
431 		if ((map & bit) == 0)
432 			break;
433 		if ((i & 7) != 0) {
434 			bit >>= 1;
435 		} else {
436 			map = *mapp--;
437 			bit = 1 << 7;
438 		}
439 	}
440 	return i;
441 }
442 
443 #define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
_ubh_find_last_zero_bit_(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned start,unsigned end)444 static inline unsigned _ubh_find_last_zero_bit_(
445 	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
446 	unsigned begin, unsigned start, unsigned end)
447 {
448 	unsigned base, count, pos, size;
449 
450 	size = start - end;
451 	begin <<= 3;
452 	start += begin;
453 	base = start >> uspi->s_bpfshift;
454 	start &= uspi->s_bpfmask;
455 	for (;;) {
456 		count = min_t(unsigned int,
457 			    size + (uspi->s_bpf - start), uspi->s_bpf)
458 			- (uspi->s_bpf - start);
459 		size -= count;
460 		pos = find_last_zero_bit (ubh->bh[base]->b_data,
461 			start, start - count);
462 		if (pos > start - count || !size)
463 			break;
464 		base--;
465 		start = uspi->s_bpf;
466 	}
467 	return (base << uspi->s_bpfshift) + pos - begin;
468 }
469 
470 #define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block))
471 
472 #define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block)
_ubh_isblockset_(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned block)473 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
474 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
475 {
476 	u8 mask;
477 	switch (uspi->s_fpb) {
478 	case 8:
479 	    	return (*ubh_get_addr (ubh, begin + block) == 0xff);
480 	case 4:
481 		mask = 0x0f << ((block & 0x01) << 2);
482 		return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
483 	case 2:
484 		mask = 0x03 << ((block & 0x03) << 1);
485 		return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
486 	case 1:
487 		mask = 0x01 << (block & 0x07);
488 		return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
489 	}
490 	return 0;
491 }
492 
493 #define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block)
_ubh_clrblock_(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned block)494 static inline void _ubh_clrblock_(struct ufs_sb_private_info * uspi,
495 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
496 {
497 	switch (uspi->s_fpb) {
498 	case 8:
499 	    	*ubh_get_addr (ubh, begin + block) = 0x00;
500 	    	return;
501 	case 4:
502 		*ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2));
503 		return;
504 	case 2:
505 		*ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1));
506 		return;
507 	case 1:
508 		*ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07)));
509 		return;
510 	}
511 }
512 
513 #define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block)
_ubh_setblock_(struct ufs_sb_private_info * uspi,struct ufs_buffer_head * ubh,unsigned begin,unsigned block)514 static inline void _ubh_setblock_(struct ufs_sb_private_info * uspi,
515 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
516 {
517 	switch (uspi->s_fpb) {
518 	case 8:
519 	    	*ubh_get_addr(ubh, begin + block) = 0xff;
520 	    	return;
521 	case 4:
522 		*ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2));
523 		return;
524 	case 2:
525 		*ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1));
526 		return;
527 	case 1:
528 		*ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07)));
529 		return;
530 	}
531 }
532 
ufs_fragacct(struct super_block * sb,unsigned blockmap,__fs32 * fraglist,int cnt)533 static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
534 	__fs32 * fraglist, int cnt)
535 {
536 	struct ufs_sb_private_info * uspi;
537 	unsigned fragsize, pos;
538 
539 	uspi = UFS_SB(sb)->s_uspi;
540 
541 	fragsize = 0;
542 	for (pos = 0; pos < uspi->s_fpb; pos++) {
543 		if (blockmap & (1 << pos)) {
544 			fragsize++;
545 		}
546 		else if (fragsize > 0) {
547 			fs32_add(sb, &fraglist[fragsize], cnt);
548 			fragsize = 0;
549 		}
550 	}
551 	if (fragsize > 0 && fragsize < uspi->s_fpb)
552 		fs32_add(sb, &fraglist[fragsize], cnt);
553 }
554 
ufs_get_direct_data_ptr(struct ufs_sb_private_info * uspi,struct ufs_inode_info * ufsi,unsigned blk)555 static inline void *ufs_get_direct_data_ptr(struct ufs_sb_private_info *uspi,
556 					    struct ufs_inode_info *ufsi,
557 					    unsigned blk)
558 {
559 	BUG_ON(blk > UFS_TIND_BLOCK);
560 	return uspi->fs_magic == UFS2_MAGIC ?
561 		(void *)&ufsi->i_u1.u2_i_data[blk] :
562 		(void *)&ufsi->i_u1.i_data[blk];
563 }
564 
ufs_data_ptr_to_cpu(struct super_block * sb,void * p)565 static inline u64 ufs_data_ptr_to_cpu(struct super_block *sb, void *p)
566 {
567 	return UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC ?
568 		fs64_to_cpu(sb, *(__fs64 *)p) :
569 		fs32_to_cpu(sb, *(__fs32 *)p);
570 }
571 
ufs_cpu_to_data_ptr(struct super_block * sb,void * p,u64 val)572 static inline void ufs_cpu_to_data_ptr(struct super_block *sb, void *p, u64 val)
573 {
574 	if (UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC)
575 		*(__fs64 *)p = cpu_to_fs64(sb, val);
576 	else
577 		*(__fs32 *)p = cpu_to_fs32(sb, val);
578 }
579 
ufs_data_ptr_clear(struct ufs_sb_private_info * uspi,void * p)580 static inline void ufs_data_ptr_clear(struct ufs_sb_private_info *uspi,
581 				      void *p)
582 {
583 	if (uspi->fs_magic == UFS2_MAGIC)
584 		*(__fs64 *)p = 0;
585 	else
586 		*(__fs32 *)p = 0;
587 }
588 
ufs_is_data_ptr_zero(struct ufs_sb_private_info * uspi,void * p)589 static inline int ufs_is_data_ptr_zero(struct ufs_sb_private_info *uspi,
590 				       void *p)
591 {
592 	if (uspi->fs_magic == UFS2_MAGIC)
593 		return *(__fs64 *)p == 0;
594 	else
595 		return *(__fs32 *)p == 0;
596 }
597