1 /**
2 * libf2fs.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * Dual licensed under the GPL or LGPL version 2 licenses.
8 */
9 #ifndef _LARGEFILE64_SOURCE
10 #define _LARGEFILE64_SOURCE
11 #endif
12 #define _FILE_OFFSET_BITS 64
13
14 #include <f2fs_fs.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <errno.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <libgen.h>
22 #ifdef HAVE_MNTENT_H
23 #include <mntent.h>
24 #endif
25 #include <time.h>
26 #include <sys/stat.h>
27 #ifndef ANDROID_WINDOWS_HOST
28 #include <sys/mount.h>
29 #include <sys/ioctl.h>
30 #endif
31 #ifdef HAVE_SYS_SYSMACROS_H
32 #include <sys/sysmacros.h>
33 #endif
34 #ifdef HAVE_SYS_UTSNAME_H
35 #include <sys/utsname.h>
36 #endif
37 #ifndef WITH_ANDROID
38 #ifdef HAVE_SCSI_SG_H
39 #include <scsi/sg.h>
40 #endif
41 #endif
42 #ifdef HAVE_LINUX_HDREG_H
43 #include <linux/hdreg.h>
44 #endif
45 #ifdef HAVE_LINUX_LIMITS_H
46 #include <linux/limits.h>
47 #endif
48
49 #ifndef WITH_ANDROID
50 /* SCSI command for standard inquiry*/
51 #define MODELINQUIRY 0x12,0x00,0x00,0x00,0x4A,0x00
52 #endif
53
54 #ifndef ANDROID_WINDOWS_HOST /* O_BINARY is windows-specific flag */
55 #define O_BINARY 0
56 #else
57 /* On Windows, wchar_t is 8 bit sized and it causes compilation errors. */
58 #define wchar_t int
59 #endif
60
61 /*
62 * UTF conversion codes are Copied from exfat tools.
63 */
utf8_to_wchar(const char * input,wchar_t * wc,size_t insize)64 static const char *utf8_to_wchar(const char *input, wchar_t *wc,
65 size_t insize)
66 {
67 if ((input[0] & 0x80) == 0 && insize >= 1) {
68 *wc = (wchar_t) input[0];
69 return input + 1;
70 }
71 if ((input[0] & 0xe0) == 0xc0 && insize >= 2) {
72 *wc = (((wchar_t) input[0] & 0x1f) << 6) |
73 ((wchar_t) input[1] & 0x3f);
74 return input + 2;
75 }
76 if ((input[0] & 0xf0) == 0xe0 && insize >= 3) {
77 *wc = (((wchar_t) input[0] & 0x0f) << 12) |
78 (((wchar_t) input[1] & 0x3f) << 6) |
79 ((wchar_t) input[2] & 0x3f);
80 return input + 3;
81 }
82 if ((input[0] & 0xf8) == 0xf0 && insize >= 4) {
83 *wc = (((wchar_t) input[0] & 0x07) << 18) |
84 (((wchar_t) input[1] & 0x3f) << 12) |
85 (((wchar_t) input[2] & 0x3f) << 6) |
86 ((wchar_t) input[3] & 0x3f);
87 return input + 4;
88 }
89 if ((input[0] & 0xfc) == 0xf8 && insize >= 5) {
90 *wc = (((wchar_t) input[0] & 0x03) << 24) |
91 (((wchar_t) input[1] & 0x3f) << 18) |
92 (((wchar_t) input[2] & 0x3f) << 12) |
93 (((wchar_t) input[3] & 0x3f) << 6) |
94 ((wchar_t) input[4] & 0x3f);
95 return input + 5;
96 }
97 if ((input[0] & 0xfe) == 0xfc && insize >= 6) {
98 *wc = (((wchar_t) input[0] & 0x01) << 30) |
99 (((wchar_t) input[1] & 0x3f) << 24) |
100 (((wchar_t) input[2] & 0x3f) << 18) |
101 (((wchar_t) input[3] & 0x3f) << 12) |
102 (((wchar_t) input[4] & 0x3f) << 6) |
103 ((wchar_t) input[5] & 0x3f);
104 return input + 6;
105 }
106 return NULL;
107 }
108
wchar_to_utf16(u_int16_t * output,wchar_t wc,size_t outsize)109 static u_int16_t *wchar_to_utf16(u_int16_t *output, wchar_t wc, size_t outsize)
110 {
111 if (wc <= 0xffff) {
112 if (outsize == 0)
113 return NULL;
114 output[0] = cpu_to_le16(wc);
115 return output + 1;
116 }
117 if (outsize < 2)
118 return NULL;
119 wc -= 0x10000;
120 output[0] = cpu_to_le16(0xd800 | ((wc >> 10) & 0x3ff));
121 output[1] = cpu_to_le16(0xdc00 | (wc & 0x3ff));
122 return output + 2;
123 }
124
utf8_to_utf16(u_int16_t * output,const char * input,size_t outsize,size_t insize)125 int utf8_to_utf16(u_int16_t *output, const char *input, size_t outsize,
126 size_t insize)
127 {
128 const char *inp = input;
129 u_int16_t *outp = output;
130 wchar_t wc;
131
132 while ((size_t)(inp - input) < insize && *inp) {
133 inp = utf8_to_wchar(inp, &wc, insize - (inp - input));
134 if (inp == NULL) {
135 DBG(0, "illegal UTF-8 sequence\n");
136 return -EILSEQ;
137 }
138 outp = wchar_to_utf16(outp, wc, outsize - (outp - output));
139 if (outp == NULL) {
140 DBG(0, "name is too long\n");
141 return -ENAMETOOLONG;
142 }
143 }
144 *outp = cpu_to_le16(0);
145 return 0;
146 }
147
utf16_to_wchar(const u_int16_t * input,wchar_t * wc,size_t insize)148 static const u_int16_t *utf16_to_wchar(const u_int16_t *input, wchar_t *wc,
149 size_t insize)
150 {
151 if ((le16_to_cpu(input[0]) & 0xfc00) == 0xd800) {
152 if (insize < 2 || (le16_to_cpu(input[1]) & 0xfc00) != 0xdc00)
153 return NULL;
154 *wc = ((wchar_t) (le16_to_cpu(input[0]) & 0x3ff) << 10);
155 *wc |= (le16_to_cpu(input[1]) & 0x3ff);
156 *wc += 0x10000;
157 return input + 2;
158 } else {
159 *wc = le16_to_cpu(*input);
160 return input + 1;
161 }
162 }
163
wchar_to_utf8(char * output,wchar_t wc,size_t outsize)164 static char *wchar_to_utf8(char *output, wchar_t wc, size_t outsize)
165 {
166 if (wc <= 0x7f) {
167 if (outsize < 1)
168 return NULL;
169 *output++ = (char) wc;
170 } else if (wc <= 0x7ff) {
171 if (outsize < 2)
172 return NULL;
173 *output++ = 0xc0 | (wc >> 6);
174 *output++ = 0x80 | (wc & 0x3f);
175 } else if (wc <= 0xffff) {
176 if (outsize < 3)
177 return NULL;
178 *output++ = 0xe0 | (wc >> 12);
179 *output++ = 0x80 | ((wc >> 6) & 0x3f);
180 *output++ = 0x80 | (wc & 0x3f);
181 } else if (wc <= 0x1fffff) {
182 if (outsize < 4)
183 return NULL;
184 *output++ = 0xf0 | (wc >> 18);
185 *output++ = 0x80 | ((wc >> 12) & 0x3f);
186 *output++ = 0x80 | ((wc >> 6) & 0x3f);
187 *output++ = 0x80 | (wc & 0x3f);
188 } else if (wc <= 0x3ffffff) {
189 if (outsize < 5)
190 return NULL;
191 *output++ = 0xf8 | (wc >> 24);
192 *output++ = 0x80 | ((wc >> 18) & 0x3f);
193 *output++ = 0x80 | ((wc >> 12) & 0x3f);
194 *output++ = 0x80 | ((wc >> 6) & 0x3f);
195 *output++ = 0x80 | (wc & 0x3f);
196 } else if (wc <= 0x7fffffff) {
197 if (outsize < 6)
198 return NULL;
199 *output++ = 0xfc | (wc >> 30);
200 *output++ = 0x80 | ((wc >> 24) & 0x3f);
201 *output++ = 0x80 | ((wc >> 18) & 0x3f);
202 *output++ = 0x80 | ((wc >> 12) & 0x3f);
203 *output++ = 0x80 | ((wc >> 6) & 0x3f);
204 *output++ = 0x80 | (wc & 0x3f);
205 } else
206 return NULL;
207
208 return output;
209 }
210
utf16_to_utf8(char * output,const u_int16_t * input,size_t outsize,size_t insize)211 int utf16_to_utf8(char *output, const u_int16_t *input, size_t outsize,
212 size_t insize)
213 {
214 const u_int16_t *inp = input;
215 char *outp = output;
216 wchar_t wc;
217
218 while ((size_t)(inp - input) < insize && le16_to_cpu(*inp)) {
219 inp = utf16_to_wchar(inp, &wc, insize - (inp - input));
220 if (inp == NULL) {
221 DBG(0, "illegal UTF-16 sequence\n");
222 return -EILSEQ;
223 }
224 outp = wchar_to_utf8(outp, wc, outsize - (outp - output));
225 if (outp == NULL) {
226 DBG(0, "name is too long\n");
227 return -ENAMETOOLONG;
228 }
229 }
230 *outp = '\0';
231 return 0;
232 }
233
log_base_2(u_int32_t num)234 int log_base_2(u_int32_t num)
235 {
236 int ret = 0;
237 if (num <= 0 || (num & (num - 1)) != 0)
238 return -1;
239
240 while (num >>= 1)
241 ret++;
242 return ret;
243 }
244
245 /*
246 * f2fs bit operations
247 */
248 static const int bits_in_byte[256] = {
249 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
250 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
251 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
252 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
253 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
254 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
255 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
256 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
257 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
258 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
259 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
260 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
261 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
262 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
263 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
264 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
265 };
266
get_bits_in_byte(unsigned char n)267 int get_bits_in_byte(unsigned char n)
268 {
269 return bits_in_byte[n];
270 }
271
test_and_set_bit_le(u32 nr,u8 * addr)272 int test_and_set_bit_le(u32 nr, u8 *addr)
273 {
274 int mask, retval;
275
276 addr += nr >> 3;
277 mask = 1 << ((nr & 0x07));
278 retval = mask & *addr;
279 *addr |= mask;
280 return retval;
281 }
282
test_and_clear_bit_le(u32 nr,u8 * addr)283 int test_and_clear_bit_le(u32 nr, u8 *addr)
284 {
285 int mask, retval;
286
287 addr += nr >> 3;
288 mask = 1 << ((nr & 0x07));
289 retval = mask & *addr;
290 *addr &= ~mask;
291 return retval;
292 }
293
test_bit_le(u32 nr,const u8 * addr)294 int test_bit_le(u32 nr, const u8 *addr)
295 {
296 return ((1 << (nr & 7)) & (addr[nr >> 3]));
297 }
298
f2fs_test_bit(unsigned int nr,const char * p)299 int f2fs_test_bit(unsigned int nr, const char *p)
300 {
301 int mask;
302 char *addr = (char *)p;
303
304 addr += (nr >> 3);
305 mask = 1 << (7 - (nr & 0x07));
306 return (mask & *addr) != 0;
307 }
308
f2fs_set_bit(unsigned int nr,char * addr)309 int f2fs_set_bit(unsigned int nr, char *addr)
310 {
311 int mask;
312 int ret;
313
314 addr += (nr >> 3);
315 mask = 1 << (7 - (nr & 0x07));
316 ret = mask & *addr;
317 *addr |= mask;
318 return ret;
319 }
320
f2fs_clear_bit(unsigned int nr,char * addr)321 int f2fs_clear_bit(unsigned int nr, char *addr)
322 {
323 int mask;
324 int ret;
325
326 addr += (nr >> 3);
327 mask = 1 << (7 - (nr & 0x07));
328 ret = mask & *addr;
329 *addr &= ~mask;
330 return ret;
331 }
332
__ffs(u8 word)333 static inline u64 __ffs(u8 word)
334 {
335 int num = 0;
336
337 if ((word & 0xf) == 0) {
338 num += 4;
339 word >>= 4;
340 }
341 if ((word & 0x3) == 0) {
342 num += 2;
343 word >>= 2;
344 }
345 if ((word & 0x1) == 0)
346 num += 1;
347 return num;
348 }
349
350 /* Copied from linux/lib/find_bit.c */
351 #define BITMAP_FIRST_BYTE_MASK(start) (0xff << ((start) & (BITS_PER_BYTE - 1)))
352
_find_next_bit_le(const u8 * addr,u64 nbits,u64 start,char invert)353 static u64 _find_next_bit_le(const u8 *addr, u64 nbits, u64 start, char invert)
354 {
355 u8 tmp;
356
357 if (!nbits || start >= nbits)
358 return nbits;
359
360 tmp = addr[start / BITS_PER_BYTE] ^ invert;
361
362 /* Handle 1st word. */
363 tmp &= BITMAP_FIRST_BYTE_MASK(start);
364 start = round_down(start, BITS_PER_BYTE);
365
366 while (!tmp) {
367 start += BITS_PER_BYTE;
368 if (start >= nbits)
369 return nbits;
370
371 tmp = addr[start / BITS_PER_BYTE] ^ invert;
372 }
373
374 return min(start + __ffs(tmp), nbits);
375 }
376
find_next_bit_le(const u8 * addr,u64 size,u64 offset)377 u64 find_next_bit_le(const u8 *addr, u64 size, u64 offset)
378 {
379 return _find_next_bit_le(addr, size, offset, 0);
380 }
381
382
find_next_zero_bit_le(const u8 * addr,u64 size,u64 offset)383 u64 find_next_zero_bit_le(const u8 *addr, u64 size, u64 offset)
384 {
385 return _find_next_bit_le(addr, size, offset, 0xff);
386 }
387
388 /*
389 * Hashing code adapted from ext3
390 */
391 #define DELTA 0x9E3779B9
392
TEA_transform(unsigned int buf[4],unsigned int const in[])393 static void TEA_transform(unsigned int buf[4], unsigned int const in[])
394 {
395 __u32 sum = 0;
396 __u32 b0 = buf[0], b1 = buf[1];
397 __u32 a = in[0], b = in[1], c = in[2], d = in[3];
398 int n = 16;
399
400 do {
401 sum += DELTA;
402 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
403 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
404 } while (--n);
405
406 buf[0] += b0;
407 buf[1] += b1;
408
409 }
410
str2hashbuf(const unsigned char * msg,int len,unsigned int * buf,int num)411 static void str2hashbuf(const unsigned char *msg, int len,
412 unsigned int *buf, int num)
413 {
414 unsigned pad, val;
415 int i;
416
417 pad = (__u32)len | ((__u32)len << 8);
418 pad |= pad << 16;
419
420 val = pad;
421 if (len > num * 4)
422 len = num * 4;
423 for (i = 0; i < len; i++) {
424 if ((i % 4) == 0)
425 val = pad;
426 val = msg[i] + (val << 8);
427 if ((i % 4) == 3) {
428 *buf++ = val;
429 val = pad;
430 num--;
431 }
432 }
433 if (--num >= 0)
434 *buf++ = val;
435 while (--num >= 0)
436 *buf++ = pad;
437
438 }
439
440 /**
441 * Return hash value of directory entry
442 * @param name dentry name
443 * @param len name lenth
444 * @return return on success hash value, errno on failure
445 */
__f2fs_dentry_hash(const unsigned char * name,int len)446 static f2fs_hash_t __f2fs_dentry_hash(const unsigned char *name, int len)/* Need update */
447 {
448 __u32 hash;
449 f2fs_hash_t f2fs_hash;
450 const unsigned char *p;
451 __u32 in[8], buf[4];
452
453 /* special hash codes for special dentries */
454 if ((len <= 2) && (name[0] == '.') &&
455 (name[1] == '.' || name[1] == '\0'))
456 return 0;
457
458 /* Initialize the default seed for the hash checksum functions */
459 buf[0] = 0x67452301;
460 buf[1] = 0xefcdab89;
461 buf[2] = 0x98badcfe;
462 buf[3] = 0x10325476;
463
464 p = name;
465 while (1) {
466 str2hashbuf(p, len, in, 4);
467 TEA_transform(buf, in);
468 p += 16;
469 if (len <= 16)
470 break;
471 len -= 16;
472 }
473 hash = buf[0];
474
475 f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
476 return f2fs_hash;
477 }
478
f2fs_dentry_hash(int encoding,int casefolded,const unsigned char * name,int len)479 f2fs_hash_t f2fs_dentry_hash(int encoding, int casefolded,
480 const unsigned char *name, int len)
481 {
482 const struct f2fs_nls_table *table = f2fs_load_nls_table(encoding);
483 int r, dlen;
484 unsigned char *buff;
485
486 if (len && casefolded) {
487 buff = malloc(sizeof(char) * PATH_MAX);
488 if (!buff)
489 return -ENOMEM;
490 dlen = table->ops->casefold(table, name, len, buff, PATH_MAX);
491 if (dlen < 0) {
492 free(buff);
493 goto opaque_seq;
494 }
495 r = __f2fs_dentry_hash(buff, dlen);
496
497 free(buff);
498 return r;
499 }
500 opaque_seq:
501 return __f2fs_dentry_hash(name, len);
502 }
503
504 #define ALIGN_DOWN(addrs, size) (((addrs) / (size)) * (size))
addrs_per_inode(struct f2fs_inode * i)505 unsigned int addrs_per_inode(struct f2fs_inode *i)
506 {
507 unsigned int addrs = CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
508
509 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
510 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
511 return addrs;
512 return ALIGN_DOWN(addrs, 1 << i->i_log_cluster_size);
513 }
514
addrs_per_block(struct f2fs_inode * i)515 unsigned int addrs_per_block(struct f2fs_inode *i)
516 {
517 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
518 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
519 return DEF_ADDRS_PER_BLOCK;
520 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1 << i->i_log_cluster_size);
521 }
522
523 /*
524 * CRC32
525 */
526 #define CRCPOLY_LE 0xedb88320
527
f2fs_cal_crc32(u_int32_t crc,void * buf,int len)528 u_int32_t f2fs_cal_crc32(u_int32_t crc, void *buf, int len)
529 {
530 int i;
531 unsigned char *p = (unsigned char *)buf;
532 while (len--) {
533 crc ^= *p++;
534 for (i = 0; i < 8; i++)
535 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
536 }
537 return crc;
538 }
539
f2fs_crc_valid(u_int32_t blk_crc,void * buf,int len)540 int f2fs_crc_valid(u_int32_t blk_crc, void *buf, int len)
541 {
542 u_int32_t cal_crc = 0;
543
544 cal_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, buf, len);
545
546 if (cal_crc != blk_crc) {
547 DBG(0,"CRC validation failed: cal_crc = %u, "
548 "blk_crc = %u buff_size = 0x%x\n",
549 cal_crc, blk_crc, len);
550 return -1;
551 }
552 return 0;
553 }
554
f2fs_inode_chksum(struct f2fs_node * node)555 __u32 f2fs_inode_chksum(struct f2fs_node *node)
556 {
557 struct f2fs_inode *ri = &node->i;
558 __le32 ino = node->footer.ino;
559 __le32 gen = ri->i_generation;
560 __u32 chksum, chksum_seed;
561 __u32 dummy_cs = 0;
562 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
563 unsigned int cs_size = sizeof(dummy_cs);
564
565 chksum = f2fs_cal_crc32(c.chksum_seed, (__u8 *)&ino,
566 sizeof(ino));
567 chksum_seed = f2fs_cal_crc32(chksum, (__u8 *)&gen, sizeof(gen));
568
569 chksum = f2fs_cal_crc32(chksum_seed, (__u8 *)ri, offset);
570 chksum = f2fs_cal_crc32(chksum, (__u8 *)&dummy_cs, cs_size);
571 offset += cs_size;
572 chksum = f2fs_cal_crc32(chksum, (__u8 *)ri + offset,
573 F2FS_BLKSIZE - offset);
574 return chksum;
575 }
576
f2fs_checkpoint_chksum(struct f2fs_checkpoint * cp)577 __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *cp)
578 {
579 unsigned int chksum_ofs = le32_to_cpu(cp->checksum_offset);
580 __u32 chksum;
581
582 chksum = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, chksum_ofs);
583 if (chksum_ofs < CP_CHKSUM_OFFSET) {
584 chksum_ofs += sizeof(chksum);
585 chksum = f2fs_cal_crc32(chksum, (__u8 *)cp + chksum_ofs,
586 F2FS_BLKSIZE - chksum_ofs);
587 }
588 return chksum;
589 }
590
write_inode(struct f2fs_node * inode,u64 blkaddr)591 int write_inode(struct f2fs_node *inode, u64 blkaddr)
592 {
593 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
594 inode->i.i_inode_checksum =
595 cpu_to_le32(f2fs_inode_chksum(inode));
596 return dev_write_block(inode, blkaddr);
597 }
598
599 /*
600 * try to identify the root device
601 */
get_rootdev()602 char *get_rootdev()
603 {
604 #if defined(ANDROID_WINDOWS_HOST) || defined(WITH_ANDROID)
605 return NULL;
606 #else
607 struct stat sb;
608 int fd, ret;
609 char buf[PATH_MAX + 1];
610 char *uevent, *ptr;
611 char *rootdev;
612
613 if (stat("/", &sb) == -1)
614 return NULL;
615
616 snprintf(buf, PATH_MAX, "/sys/dev/block/%u:%u/uevent",
617 major(sb.st_dev), minor(sb.st_dev));
618
619 fd = open(buf, O_RDONLY);
620
621 if (fd < 0)
622 return NULL;
623
624 ret = lseek(fd, (off_t)0, SEEK_END);
625 (void)lseek(fd, (off_t)0, SEEK_SET);
626
627 if (ret == -1) {
628 close(fd);
629 return NULL;
630 }
631
632 uevent = malloc(ret + 1);
633 ASSERT(uevent);
634
635 uevent[ret] = '\0';
636
637 ret = read(fd, uevent, ret);
638 close(fd);
639
640 ptr = strstr(uevent, "DEVNAME");
641 if (!ptr)
642 return NULL;
643
644 ret = sscanf(ptr, "DEVNAME=%s\n", buf);
645 if (strlen(buf) == 0)
646 return NULL;
647
648 ret = strlen(buf) + 5;
649 rootdev = malloc(ret + 1);
650 if (!rootdev)
651 return NULL;
652 rootdev[ret] = '\0';
653
654 snprintf(rootdev, ret + 1, "/dev/%s", buf);
655 return rootdev;
656 #endif
657 }
658
659 /*
660 * device information
661 */
f2fs_init_configuration(void)662 void f2fs_init_configuration(void)
663 {
664 int i;
665
666 memset(&c, 0, sizeof(struct f2fs_configuration));
667 c.ndevs = 1;
668 c.sectors_per_blk = DEFAULT_SECTORS_PER_BLOCK;
669 c.blks_per_seg = DEFAULT_BLOCKS_PER_SEGMENT;
670 c.wanted_total_sectors = -1;
671 c.wanted_sector_size = -1;
672 #ifndef WITH_ANDROID
673 c.preserve_limits = 1;
674 c.no_kernel_check = 1;
675 #else
676 c.no_kernel_check = 0;
677 #endif
678
679 for (i = 0; i < MAX_DEVICES; i++) {
680 c.devices[i].fd = -1;
681 c.devices[i].sector_size = DEFAULT_SECTOR_SIZE;
682 c.devices[i].end_blkaddr = -1;
683 c.devices[i].zoned_model = F2FS_ZONED_NONE;
684 }
685
686 /* calculated by overprovision ratio */
687 c.segs_per_sec = 1;
688 c.secs_per_zone = 1;
689 c.segs_per_zone = 1;
690 c.vol_label = "";
691 c.trim = 1;
692 c.kd = -1;
693 c.fixed_time = -1;
694 c.s_encoding = 0;
695 c.s_encoding_flags = 0;
696
697 /* default root owner */
698 c.root_uid = getuid();
699 c.root_gid = getgid();
700 }
701
f2fs_dev_is_writable(void)702 int f2fs_dev_is_writable(void)
703 {
704 return !c.ro || c.force;
705 }
706
707 #ifdef HAVE_SETMNTENT
is_mounted(const char * mpt,const char * device)708 static int is_mounted(const char *mpt, const char *device)
709 {
710 FILE *file = NULL;
711 struct mntent *mnt = NULL;
712
713 file = setmntent(mpt, "r");
714 if (file == NULL)
715 return 0;
716
717 while ((mnt = getmntent(file)) != NULL) {
718 if (!strcmp(device, mnt->mnt_fsname)) {
719 #ifdef MNTOPT_RO
720 if (hasmntopt(mnt, MNTOPT_RO))
721 c.ro = 1;
722 #endif
723 break;
724 }
725 }
726 endmntent(file);
727 return mnt ? 1 : 0;
728 }
729 #endif
730
f2fs_dev_is_umounted(char * path)731 int f2fs_dev_is_umounted(char *path)
732 {
733 #ifdef ANDROID_WINDOWS_HOST
734 return 0;
735 #else
736 struct stat *st_buf;
737 int is_rootdev = 0;
738 int ret = 0;
739 char *rootdev_name = get_rootdev();
740
741 if (rootdev_name) {
742 if (!strcmp(path, rootdev_name))
743 is_rootdev = 1;
744 free(rootdev_name);
745 }
746
747 /*
748 * try with /proc/mounts fist to detect RDONLY.
749 * f2fs_stop_checkpoint makes RO in /proc/mounts while RW in /etc/mtab.
750 */
751 #ifdef __linux__
752 ret = is_mounted("/proc/mounts", path);
753 if (ret) {
754 MSG(0, "Info: Mounted device!\n");
755 return -1;
756 }
757 #endif
758 #if defined(MOUNTED) || defined(_PATH_MOUNTED)
759 #ifndef MOUNTED
760 #define MOUNTED _PATH_MOUNTED
761 #endif
762 ret = is_mounted(MOUNTED, path);
763 if (ret) {
764 MSG(0, "Info: Mounted device!\n");
765 return -1;
766 }
767 #endif
768 /*
769 * If we are supposed to operate on the root device, then
770 * also check the mounts for '/dev/root', which sometimes
771 * functions as an alias for the root device.
772 */
773 if (is_rootdev) {
774 #ifdef __linux__
775 ret = is_mounted("/proc/mounts", "/dev/root");
776 if (ret) {
777 MSG(0, "Info: Mounted device!\n");
778 return -1;
779 }
780 #endif
781 }
782
783 /*
784 * If f2fs is umounted with -l, the process can still use
785 * the file system. In this case, we should not format.
786 */
787 st_buf = malloc(sizeof(struct stat));
788 ASSERT(st_buf);
789
790 if (stat(path, st_buf) == 0 && S_ISBLK(st_buf->st_mode)) {
791 int fd = open(path, O_RDONLY | O_EXCL);
792
793 if (fd >= 0) {
794 close(fd);
795 } else if (errno == EBUSY) {
796 MSG(0, "\tError: In use by the system!\n");
797 free(st_buf);
798 return -1;
799 }
800 }
801 free(st_buf);
802 return ret;
803 #endif
804 }
805
f2fs_devs_are_umounted(void)806 int f2fs_devs_are_umounted(void)
807 {
808 int i;
809
810 for (i = 0; i < c.ndevs; i++)
811 if (f2fs_dev_is_umounted((char *)c.devices[i].path))
812 return -1;
813 return 0;
814 }
815
get_kernel_version(__u8 * version)816 void get_kernel_version(__u8 *version)
817 {
818 int i;
819 for (i = 0; i < VERSION_LEN; i++) {
820 if (version[i] == '\n')
821 break;
822 }
823 memset(version + i, 0, VERSION_LEN + 1 - i);
824 }
825
get_kernel_uname_version(__u8 * version)826 void get_kernel_uname_version(__u8 *version)
827 {
828 #ifdef HAVE_SYS_UTSNAME_H
829 struct utsname buf;
830
831 memset(version, 0, VERSION_LEN);
832 if (uname(&buf))
833 return;
834
835 #if !defined(WITH_KERNEL_VERSION)
836 snprintf((char *)version,
837 VERSION_LEN, "%s %s", buf.release, buf.version);
838 #else
839 snprintf((char *)version,
840 VERSION_LEN, "%s", buf.release);
841 #endif
842 #else
843 memset(version, 0, VERSION_LEN);
844 #endif
845 }
846
847 #if defined(__linux__) && defined(_IO) && !defined(BLKGETSIZE)
848 #define BLKGETSIZE _IO(0x12,96)
849 #endif
850
851 #if defined(__linux__) && defined(_IOR) && !defined(BLKGETSIZE64)
852 #define BLKGETSIZE64 _IOR(0x12,114, size_t)
853 #endif
854
855 #if defined(__linux__) && defined(_IO) && !defined(BLKSSZGET)
856 #define BLKSSZGET _IO(0x12,104)
857 #endif
858
859 #if defined(__APPLE__)
860 #include <sys/disk.h>
861 #define BLKGETSIZE DKIOCGETBLOCKCOUNT
862 #define BLKSSZGET DKIOCGETBLOCKCOUNT
863 #endif /* APPLE_DARWIN */
864
865 #ifndef ANDROID_WINDOWS_HOST
open_check_fs(char * path,int flag)866 static int open_check_fs(char *path, int flag)
867 {
868 if (c.func != DUMP && (c.func != FSCK || c.fix_on || c.auto_fix))
869 return -1;
870
871 /* allow to open ro */
872 return open(path, O_RDONLY | flag);
873 }
874
get_device_info(int i)875 int get_device_info(int i)
876 {
877 int32_t fd = 0;
878 uint32_t sector_size;
879 #ifndef BLKGETSIZE64
880 uint32_t total_sectors;
881 #endif
882 struct stat *stat_buf;
883 #ifdef HDIO_GETGIO
884 struct hd_geometry geom;
885 #endif
886 #if !defined(WITH_ANDROID) && defined(__linux__)
887 sg_io_hdr_t io_hdr;
888 unsigned char reply_buffer[96] = {0};
889 unsigned char model_inq[6] = {MODELINQUIRY};
890 #endif
891 struct device_info *dev = c.devices + i;
892
893 if (c.sparse_mode) {
894 fd = open(dev->path, O_RDWR | O_CREAT | O_BINARY, 0644);
895 if (fd < 0) {
896 fd = open_check_fs(dev->path, O_BINARY);
897 if (fd < 0) {
898 MSG(0, "\tError: Failed to open a sparse file!\n");
899 return -1;
900 }
901 }
902 }
903
904 stat_buf = malloc(sizeof(struct stat));
905 ASSERT(stat_buf);
906
907 if (!c.sparse_mode) {
908 if (stat(dev->path, stat_buf) < 0 ) {
909 MSG(0, "\tError: Failed to get the device stat!\n");
910 free(stat_buf);
911 return -1;
912 }
913
914 if (S_ISBLK(stat_buf->st_mode) &&
915 !c.force && c.func != DUMP && !c.dry_run) {
916 fd = open(dev->path, O_RDWR | O_EXCL);
917 if (fd < 0)
918 fd = open_check_fs(dev->path, O_EXCL);
919 } else {
920 fd = open(dev->path, O_RDWR);
921 if (fd < 0)
922 fd = open_check_fs(dev->path, 0);
923 }
924 }
925 if (fd < 0) {
926 MSG(0, "\tError: Failed to open the device!\n");
927 free(stat_buf);
928 return -1;
929 }
930
931 dev->fd = fd;
932
933 if (c.sparse_mode) {
934 if (f2fs_init_sparse_file()) {
935 free(stat_buf);
936 return -1;
937 }
938 }
939
940 if (c.kd == -1) {
941 #if !defined(WITH_ANDROID) && defined(__linux__)
942 c.kd = open("/proc/version", O_RDONLY);
943 #endif
944 if (c.kd < 0) {
945 MSG(0, "\tInfo: No support kernel version!\n");
946 c.kd = -2;
947 }
948 }
949
950 if (c.sparse_mode) {
951 dev->total_sectors = c.device_size / dev->sector_size;
952 } else if (S_ISREG(stat_buf->st_mode)) {
953 dev->total_sectors = stat_buf->st_size / dev->sector_size;
954 } else if (S_ISBLK(stat_buf->st_mode)) {
955 #ifdef BLKSSZGET
956 if (ioctl(fd, BLKSSZGET, §or_size) < 0)
957 MSG(0, "\tError: Using the default sector size\n");
958 else if (dev->sector_size < sector_size)
959 dev->sector_size = sector_size;
960 #endif
961 #ifdef BLKGETSIZE64
962 if (ioctl(fd, BLKGETSIZE64, &dev->total_sectors) < 0) {
963 MSG(0, "\tError: Cannot get the device size\n");
964 free(stat_buf);
965 return -1;
966 }
967 #else
968 if (ioctl(fd, BLKGETSIZE, &total_sectors) < 0) {
969 MSG(0, "\tError: Cannot get the device size\n");
970 free(stat_buf);
971 return -1;
972 }
973 dev->total_sectors = total_sectors;
974 #endif
975 dev->total_sectors /= dev->sector_size;
976
977 if (i == 0) {
978 #ifdef HDIO_GETGIO
979 if (ioctl(fd, HDIO_GETGEO, &geom) < 0)
980 c.start_sector = 0;
981 else
982 c.start_sector = geom.start;
983 #else
984 c.start_sector = 0;
985 #endif
986 }
987
988 #if !defined(WITH_ANDROID) && defined(__linux__)
989 /* Send INQUIRY command */
990 memset(&io_hdr, 0, sizeof(sg_io_hdr_t));
991 io_hdr.interface_id = 'S';
992 io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
993 io_hdr.dxfer_len = sizeof(reply_buffer);
994 io_hdr.dxferp = reply_buffer;
995 io_hdr.cmd_len = sizeof(model_inq);
996 io_hdr.cmdp = model_inq;
997 io_hdr.timeout = 1000;
998
999 if (!ioctl(fd, SG_IO, &io_hdr)) {
1000 MSG(0, "Info: [%s] Disk Model: %.16s\n",
1001 dev->path, reply_buffer+16);
1002 }
1003 #endif
1004 } else {
1005 MSG(0, "\tError: Volume type is not supported!!!\n");
1006 free(stat_buf);
1007 return -1;
1008 }
1009
1010 if (!c.sector_size) {
1011 c.sector_size = dev->sector_size;
1012 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1013 } else if (c.sector_size != c.devices[i].sector_size) {
1014 MSG(0, "\tError: Different sector sizes!!!\n");
1015 free(stat_buf);
1016 return -1;
1017 }
1018
1019 #if !defined(WITH_ANDROID) && defined(__linux__)
1020 if (S_ISBLK(stat_buf->st_mode)) {
1021 if (f2fs_get_zoned_model(i) < 0) {
1022 free(stat_buf);
1023 return -1;
1024 }
1025 }
1026
1027 if (dev->zoned_model != F2FS_ZONED_NONE) {
1028
1029 /* Get the number of blocks per zones */
1030 if (f2fs_get_zone_blocks(i)) {
1031 MSG(0, "\tError: Failed to get number of blocks per zone\n");
1032 free(stat_buf);
1033 return -1;
1034 }
1035
1036 /*
1037 * Check zone configuration: for the first disk of a
1038 * multi-device volume, conventional zones are needed.
1039 */
1040 if (f2fs_check_zones(i)) {
1041 MSG(0, "\tError: Failed to check zone configuration\n");
1042 free(stat_buf);
1043 return -1;
1044 }
1045 MSG(0, "Info: Host-%s zoned block device:\n",
1046 (dev->zoned_model == F2FS_ZONED_HA) ?
1047 "aware" : "managed");
1048 MSG(0, " %u zones, %u randomly writeable zones\n",
1049 dev->nr_zones, dev->nr_rnd_zones);
1050 MSG(0, " %lu blocks per zone\n",
1051 dev->zone_blocks);
1052 }
1053 #endif
1054 /* adjust wanted_total_sectors */
1055 if (c.wanted_total_sectors != -1) {
1056 MSG(0, "Info: wanted sectors = %"PRIu64" (in %"PRIu64" bytes)\n",
1057 c.wanted_total_sectors, c.wanted_sector_size);
1058 if (c.wanted_sector_size == -1) {
1059 c.wanted_sector_size = dev->sector_size;
1060 } else if (dev->sector_size != c.wanted_sector_size) {
1061 c.wanted_total_sectors *= c.wanted_sector_size;
1062 c.wanted_total_sectors /= dev->sector_size;
1063 }
1064 }
1065
1066 c.total_sectors += dev->total_sectors;
1067 free(stat_buf);
1068 return 0;
1069 }
1070
1071 #else
1072
1073 #include "windows.h"
1074 #include "winioctl.h"
1075
1076 #if (_WIN32_WINNT >= 0x0500)
1077 #define HAVE_GET_FILE_SIZE_EX 1
1078 #endif
1079
win_get_device_size(const char * file,uint64_t * device_size)1080 static int win_get_device_size(const char *file, uint64_t *device_size)
1081 {
1082 HANDLE dev;
1083 PARTITION_INFORMATION pi;
1084 DISK_GEOMETRY gi;
1085 DWORD retbytes;
1086 #ifdef HAVE_GET_FILE_SIZE_EX
1087 LARGE_INTEGER filesize;
1088 #else
1089 DWORD filesize;
1090 #endif /* HAVE_GET_FILE_SIZE_EX */
1091
1092 dev = CreateFile(file, GENERIC_READ,
1093 FILE_SHARE_READ | FILE_SHARE_WRITE ,
1094 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
1095
1096 if (dev == INVALID_HANDLE_VALUE)
1097 return EBADF;
1098 if (DeviceIoControl(dev, IOCTL_DISK_GET_PARTITION_INFO,
1099 &pi, sizeof(PARTITION_INFORMATION),
1100 &pi, sizeof(PARTITION_INFORMATION),
1101 &retbytes, NULL)) {
1102
1103 *device_size = pi.PartitionLength.QuadPart;
1104
1105 } else if (DeviceIoControl(dev, IOCTL_DISK_GET_DRIVE_GEOMETRY,
1106 &gi, sizeof(DISK_GEOMETRY),
1107 &gi, sizeof(DISK_GEOMETRY),
1108 &retbytes, NULL)) {
1109
1110 *device_size = gi.BytesPerSector *
1111 gi.SectorsPerTrack *
1112 gi.TracksPerCylinder *
1113 gi.Cylinders.QuadPart;
1114
1115 #ifdef HAVE_GET_FILE_SIZE_EX
1116 } else if (GetFileSizeEx(dev, &filesize)) {
1117 *device_size = filesize.QuadPart;
1118 }
1119 #else
1120 } else {
1121 filesize = GetFileSize(dev, NULL);
1122 if (INVALID_FILE_SIZE != filesize)
1123 return -1;
1124 *device_size = filesize;
1125 }
1126 #endif /* HAVE_GET_FILE_SIZE_EX */
1127
1128 CloseHandle(dev);
1129 return 0;
1130 }
1131
get_device_info(int i)1132 int get_device_info(int i)
1133 {
1134 struct device_info *dev = c.devices + i;
1135 uint64_t device_size = 0;
1136 int32_t fd = 0;
1137
1138 /* Block device target is not supported on Windows. */
1139 if (!c.sparse_mode) {
1140 if (win_get_device_size(dev->path, &device_size)) {
1141 MSG(0, "\tError: Failed to get device size!\n");
1142 return -1;
1143 }
1144 } else {
1145 device_size = c.device_size;
1146 }
1147 if (c.sparse_mode) {
1148 fd = open((char *)dev->path, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
1149 } else {
1150 fd = open((char *)dev->path, O_RDWR | O_BINARY);
1151 }
1152 if (fd < 0) {
1153 MSG(0, "\tError: Failed to open the device!\n");
1154 return -1;
1155 }
1156 dev->fd = fd;
1157 dev->total_sectors = device_size / dev->sector_size;
1158 c.start_sector = 0;
1159 c.sector_size = dev->sector_size;
1160 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1161 c.total_sectors += dev->total_sectors;
1162
1163 if (c.sparse_mode && f2fs_init_sparse_file())
1164 return -1;
1165 return 0;
1166 }
1167 #endif
1168
f2fs_get_device_info(void)1169 int f2fs_get_device_info(void)
1170 {
1171 int i;
1172
1173 for (i = 0; i < c.ndevs; i++)
1174 if (get_device_info(i))
1175 return -1;
1176
1177 if (c.wanted_total_sectors < c.total_sectors) {
1178 MSG(0, "Info: total device sectors = %"PRIu64" (in %u bytes)\n",
1179 c.total_sectors, c.sector_size);
1180 c.total_sectors = c.wanted_total_sectors;
1181 c.devices[0].total_sectors = c.total_sectors;
1182 }
1183 if (c.total_sectors * c.sector_size >
1184 (u_int64_t)F2FS_MAX_SEGMENT * 2 * 1024 * 1024) {
1185 MSG(0, "\tError: F2FS can support 16TB at most!!!\n");
1186 return -1;
1187 }
1188
1189 /*
1190 * Check device types and determine the final volume operation mode:
1191 * - If all devices are regular block devices, default operation.
1192 * - If at least one HM device is found, operate in HM mode (BLKZONED
1193 * feature will be enabled by mkfs).
1194 * - If an HA device is found, let mkfs decide based on the -m option
1195 * setting by the user.
1196 */
1197 c.zoned_model = F2FS_ZONED_NONE;
1198 for (i = 0; i < c.ndevs; i++) {
1199 switch (c.devices[i].zoned_model) {
1200 case F2FS_ZONED_NONE:
1201 continue;
1202 case F2FS_ZONED_HM:
1203 c.zoned_model = F2FS_ZONED_HM;
1204 break;
1205 case F2FS_ZONED_HA:
1206 if (c.zoned_model != F2FS_ZONED_HM)
1207 c.zoned_model = F2FS_ZONED_HA;
1208 break;
1209 }
1210 }
1211
1212 if (c.zoned_model != F2FS_ZONED_NONE) {
1213
1214 /*
1215 * For zoned model, the zones sizes of all zoned devices must
1216 * be equal.
1217 */
1218 for (i = 0; i < c.ndevs; i++) {
1219 if (c.devices[i].zoned_model == F2FS_ZONED_NONE)
1220 continue;
1221 if (c.zone_blocks &&
1222 c.zone_blocks != c.devices[i].zone_blocks) {
1223 MSG(0, "\tError: zones of different size are "
1224 "not supported\n");
1225 return -1;
1226 }
1227 c.zone_blocks = c.devices[i].zone_blocks;
1228 }
1229
1230 /*
1231 * Align sections to the device zone size and align F2FS zones
1232 * to the device zones. For F2FS_ZONED_HA model without the
1233 * BLKZONED feature set at format time, this is only an
1234 * optimization as sequential writes will not be enforced.
1235 */
1236 c.segs_per_sec = c.zone_blocks / DEFAULT_BLOCKS_PER_SEGMENT;
1237 c.secs_per_zone = 1;
1238 } else {
1239 if(c.zoned_mode != 0) {
1240 MSG(0, "\n Error: %s may not be a zoned block device \n",
1241 c.devices[0].path);
1242 return -1;
1243 }
1244 }
1245
1246 c.segs_per_zone = c.segs_per_sec * c.secs_per_zone;
1247
1248 MSG(0, "Info: Segments per section = %d\n", c.segs_per_sec);
1249 MSG(0, "Info: Sections per zone = %d\n", c.secs_per_zone);
1250 MSG(0, "Info: sector size = %u\n", c.sector_size);
1251 MSG(0, "Info: total sectors = %"PRIu64" (%"PRIu64" MB)\n",
1252 c.total_sectors, (c.total_sectors *
1253 (c.sector_size >> 9)) >> 11);
1254 return 0;
1255 }
1256
calc_extra_isize(void)1257 unsigned int calc_extra_isize(void)
1258 {
1259 unsigned int size = offsetof(struct f2fs_inode, i_projid);
1260
1261 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
1262 size = offsetof(struct f2fs_inode, i_projid);
1263 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1264 size = offsetof(struct f2fs_inode, i_inode_checksum);
1265 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
1266 size = offsetof(struct f2fs_inode, i_crtime);
1267 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME))
1268 size = offsetof(struct f2fs_inode, i_compr_blocks);
1269 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION))
1270 size = offsetof(struct f2fs_inode, i_extra_end);
1271
1272 return size - F2FS_EXTRA_ISIZE_OFFSET;
1273 }
1274
1275 #define ARRAY_SIZE(array) \
1276 (sizeof(array) / sizeof(array[0]))
1277
1278 static const struct {
1279 char *name;
1280 __u16 encoding_magic;
1281 __u16 default_flags;
1282
1283 } f2fs_encoding_map[] = {
1284 {
1285 .encoding_magic = F2FS_ENC_UTF8_12_1,
1286 .name = "utf8",
1287 .default_flags = 0,
1288 },
1289 };
1290
1291 static const struct enc_flags {
1292 __u16 flag;
1293 char *param;
1294 } encoding_flags[] = {
1295 { F2FS_ENC_STRICT_MODE_FL, "strict" },
1296 };
1297
1298 /* Return a positive number < 0xff indicating the encoding magic number
1299 * or a negative value indicating error. */
f2fs_str2encoding(const char * string)1300 int f2fs_str2encoding(const char *string)
1301 {
1302 int i;
1303
1304 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1305 if (!strcmp(string, f2fs_encoding_map[i].name))
1306 return f2fs_encoding_map[i].encoding_magic;
1307
1308 return -EINVAL;
1309 }
1310
f2fs_encoding2str(const int encoding)1311 char *f2fs_encoding2str(const int encoding)
1312 {
1313 int i;
1314
1315 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1316 if (f2fs_encoding_map[i].encoding_magic == encoding)
1317 return f2fs_encoding_map[i].name;
1318
1319 return NULL;
1320 }
1321
f2fs_get_encoding_flags(int encoding)1322 int f2fs_get_encoding_flags(int encoding)
1323 {
1324 int i;
1325
1326 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1327 if (f2fs_encoding_map[i].encoding_magic == encoding)
1328 return f2fs_encoding_map[encoding].default_flags;
1329
1330 return 0;
1331 }
1332
f2fs_str2encoding_flags(char ** param,__u16 * flags)1333 int f2fs_str2encoding_flags(char **param, __u16 *flags)
1334 {
1335 char *f = strtok(*param, ",");
1336 const struct enc_flags *fl;
1337 int i, neg = 0;
1338
1339 while (f) {
1340 neg = 0;
1341 if (!strncmp("no", f, 2)) {
1342 neg = 1;
1343 f += 2;
1344 }
1345
1346 for (i = 0; i < ARRAY_SIZE(encoding_flags); i++) {
1347 fl = &encoding_flags[i];
1348 if (!strcmp(fl->param, f)) {
1349 if (neg) {
1350 MSG(0, "Sub %s\n", fl->param);
1351 *flags &= ~fl->flag;
1352 } else {
1353 MSG(0, "Add %s\n", fl->param);
1354 *flags |= fl->flag;
1355 }
1356
1357 goto next_flag;
1358 }
1359 }
1360 *param = f;
1361 return -EINVAL;
1362 next_flag:
1363 f = strtok(NULL, ":");
1364 }
1365 return 0;
1366 }
1367