1 /**
2 * libf2fs.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * Dual licensed under the GPL or LGPL version 2 licenses.
8 */
9 #ifndef _LARGEFILE64_SOURCE
10 #define _LARGEFILE64_SOURCE
11 #endif
12 #define _FILE_OFFSET_BITS 64
13
14 #include <f2fs_fs.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <errno.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <libgen.h>
22 #ifdef HAVE_MNTENT_H
23 #include <mntent.h>
24 #endif
25 #include <time.h>
26 #include <sys/stat.h>
27 #ifdef HAVE_SYS_IOCTL_H
28 #include <sys/ioctl.h>
29 #endif
30 #ifdef HAVE_SYS_SYSMACROS_H
31 #include <sys/sysmacros.h>
32 #endif
33 #ifdef HAVE_SYS_UTSNAME_H
34 #include <sys/utsname.h>
35 #endif
36 #ifdef HAVE_SCSI_SG_H
37 #include <scsi/sg.h>
38 #endif
39 #ifdef HAVE_LINUX_HDREG_H
40 #include <linux/hdreg.h>
41 #endif
42 #ifdef HAVE_LINUX_LIMITS_H
43 #include <linux/limits.h>
44 #endif
45
46 /* SCSI command for standard inquiry*/
47 #define MODELINQUIRY 0x12,0x00,0x00,0x00,0x4A,0x00
48
49 #ifndef _WIN32 /* O_BINARY is windows-specific flag */
50 #define O_BINARY 0
51 #else
52 /* On Windows, wchar_t is 8 bit sized and it causes compilation errors. */
53 #define wchar_t int
54 #endif
55
56 /*
57 * UTF conversion codes are Copied from exfat tools.
58 */
utf8_to_wchar(const char * input,wchar_t * wc,size_t insize)59 static const char *utf8_to_wchar(const char *input, wchar_t *wc,
60 size_t insize)
61 {
62 if ((input[0] & 0x80) == 0 && insize >= 1) {
63 *wc = (wchar_t) input[0];
64 return input + 1;
65 }
66 if ((input[0] & 0xe0) == 0xc0 && insize >= 2) {
67 *wc = (((wchar_t) input[0] & 0x1f) << 6) |
68 ((wchar_t) input[1] & 0x3f);
69 return input + 2;
70 }
71 if ((input[0] & 0xf0) == 0xe0 && insize >= 3) {
72 *wc = (((wchar_t) input[0] & 0x0f) << 12) |
73 (((wchar_t) input[1] & 0x3f) << 6) |
74 ((wchar_t) input[2] & 0x3f);
75 return input + 3;
76 }
77 if ((input[0] & 0xf8) == 0xf0 && insize >= 4) {
78 *wc = (((wchar_t) input[0] & 0x07) << 18) |
79 (((wchar_t) input[1] & 0x3f) << 12) |
80 (((wchar_t) input[2] & 0x3f) << 6) |
81 ((wchar_t) input[3] & 0x3f);
82 return input + 4;
83 }
84 if ((input[0] & 0xfc) == 0xf8 && insize >= 5) {
85 *wc = (((wchar_t) input[0] & 0x03) << 24) |
86 (((wchar_t) input[1] & 0x3f) << 18) |
87 (((wchar_t) input[2] & 0x3f) << 12) |
88 (((wchar_t) input[3] & 0x3f) << 6) |
89 ((wchar_t) input[4] & 0x3f);
90 return input + 5;
91 }
92 if ((input[0] & 0xfe) == 0xfc && insize >= 6) {
93 *wc = (((wchar_t) input[0] & 0x01) << 30) |
94 (((wchar_t) input[1] & 0x3f) << 24) |
95 (((wchar_t) input[2] & 0x3f) << 18) |
96 (((wchar_t) input[3] & 0x3f) << 12) |
97 (((wchar_t) input[4] & 0x3f) << 6) |
98 ((wchar_t) input[5] & 0x3f);
99 return input + 6;
100 }
101 return NULL;
102 }
103
wchar_to_utf16(uint16_t * output,wchar_t wc,size_t outsize)104 static uint16_t *wchar_to_utf16(uint16_t *output, wchar_t wc, size_t outsize)
105 {
106 if (wc <= 0xffff) {
107 if (outsize == 0)
108 return NULL;
109 output[0] = cpu_to_le16(wc);
110 return output + 1;
111 }
112 if (outsize < 2)
113 return NULL;
114 wc -= 0x10000;
115 output[0] = cpu_to_le16(0xd800 | ((wc >> 10) & 0x3ff));
116 output[1] = cpu_to_le16(0xdc00 | (wc & 0x3ff));
117 return output + 2;
118 }
119
utf8_to_utf16(uint16_t * output,const char * input,size_t outsize,size_t insize)120 int utf8_to_utf16(uint16_t *output, const char *input, size_t outsize,
121 size_t insize)
122 {
123 const char *inp = input;
124 uint16_t *outp = output;
125 wchar_t wc;
126
127 while ((size_t)(inp - input) < insize && *inp) {
128 inp = utf8_to_wchar(inp, &wc, insize - (inp - input));
129 if (inp == NULL) {
130 DBG(0, "illegal UTF-8 sequence\n");
131 return -EILSEQ;
132 }
133 outp = wchar_to_utf16(outp, wc, outsize - (outp - output));
134 if (outp == NULL) {
135 DBG(0, "name is too long\n");
136 return -ENAMETOOLONG;
137 }
138 }
139 *outp = cpu_to_le16(0);
140 return 0;
141 }
142
utf16_to_wchar(const uint16_t * input,wchar_t * wc,size_t insize)143 static const uint16_t *utf16_to_wchar(const uint16_t *input, wchar_t *wc,
144 size_t insize)
145 {
146 if ((le16_to_cpu(input[0]) & 0xfc00) == 0xd800) {
147 if (insize < 2 || (le16_to_cpu(input[1]) & 0xfc00) != 0xdc00)
148 return NULL;
149 *wc = ((wchar_t) (le16_to_cpu(input[0]) & 0x3ff) << 10);
150 *wc |= (le16_to_cpu(input[1]) & 0x3ff);
151 *wc += 0x10000;
152 return input + 2;
153 } else {
154 *wc = le16_to_cpu(*input);
155 return input + 1;
156 }
157 }
158
wchar_to_utf8(char * output,wchar_t wc,size_t outsize)159 static char *wchar_to_utf8(char *output, wchar_t wc, size_t outsize)
160 {
161 if (wc <= 0x7f) {
162 if (outsize < 1)
163 return NULL;
164 *output++ = (char) wc;
165 } else if (wc <= 0x7ff) {
166 if (outsize < 2)
167 return NULL;
168 *output++ = 0xc0 | (wc >> 6);
169 *output++ = 0x80 | (wc & 0x3f);
170 } else if (wc <= 0xffff) {
171 if (outsize < 3)
172 return NULL;
173 *output++ = 0xe0 | (wc >> 12);
174 *output++ = 0x80 | ((wc >> 6) & 0x3f);
175 *output++ = 0x80 | (wc & 0x3f);
176 } else if (wc <= 0x1fffff) {
177 if (outsize < 4)
178 return NULL;
179 *output++ = 0xf0 | (wc >> 18);
180 *output++ = 0x80 | ((wc >> 12) & 0x3f);
181 *output++ = 0x80 | ((wc >> 6) & 0x3f);
182 *output++ = 0x80 | (wc & 0x3f);
183 } else if (wc <= 0x3ffffff) {
184 if (outsize < 5)
185 return NULL;
186 *output++ = 0xf8 | (wc >> 24);
187 *output++ = 0x80 | ((wc >> 18) & 0x3f);
188 *output++ = 0x80 | ((wc >> 12) & 0x3f);
189 *output++ = 0x80 | ((wc >> 6) & 0x3f);
190 *output++ = 0x80 | (wc & 0x3f);
191 } else if (wc <= 0x7fffffff) {
192 if (outsize < 6)
193 return NULL;
194 *output++ = 0xfc | (wc >> 30);
195 *output++ = 0x80 | ((wc >> 24) & 0x3f);
196 *output++ = 0x80 | ((wc >> 18) & 0x3f);
197 *output++ = 0x80 | ((wc >> 12) & 0x3f);
198 *output++ = 0x80 | ((wc >> 6) & 0x3f);
199 *output++ = 0x80 | (wc & 0x3f);
200 } else
201 return NULL;
202
203 return output;
204 }
205
utf16_to_utf8(char * output,const uint16_t * input,size_t outsize,size_t insize)206 int utf16_to_utf8(char *output, const uint16_t *input, size_t outsize,
207 size_t insize)
208 {
209 const uint16_t *inp = input;
210 char *outp = output;
211 wchar_t wc;
212
213 while ((size_t)(inp - input) < insize && le16_to_cpu(*inp)) {
214 inp = utf16_to_wchar(inp, &wc, insize - (inp - input));
215 if (inp == NULL) {
216 DBG(0, "illegal UTF-16 sequence\n");
217 return -EILSEQ;
218 }
219 outp = wchar_to_utf8(outp, wc, outsize - (outp - output));
220 if (outp == NULL) {
221 DBG(0, "name is too long\n");
222 return -ENAMETOOLONG;
223 }
224 }
225 *outp = '\0';
226 return 0;
227 }
228
log_base_2(uint32_t num)229 int log_base_2(uint32_t num)
230 {
231 int ret = 0;
232 if (num <= 0 || (num & (num - 1)) != 0)
233 return -1;
234
235 while (num >>= 1)
236 ret++;
237 return ret;
238 }
239
240 /*
241 * f2fs bit operations
242 */
243 static const int bits_in_byte[256] = {
244 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
245 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
246 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
247 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
248 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
249 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
250 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
251 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
252 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
253 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
254 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
255 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
256 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
257 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
258 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
259 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
260 };
261
get_bits_in_byte(unsigned char n)262 int get_bits_in_byte(unsigned char n)
263 {
264 return bits_in_byte[n];
265 }
266
test_and_set_bit_le(u32 nr,u8 * addr)267 int test_and_set_bit_le(u32 nr, u8 *addr)
268 {
269 int mask, retval;
270
271 addr += nr >> 3;
272 mask = 1 << ((nr & 0x07));
273 retval = mask & *addr;
274 *addr |= mask;
275 return retval;
276 }
277
test_and_clear_bit_le(u32 nr,u8 * addr)278 int test_and_clear_bit_le(u32 nr, u8 *addr)
279 {
280 int mask, retval;
281
282 addr += nr >> 3;
283 mask = 1 << ((nr & 0x07));
284 retval = mask & *addr;
285 *addr &= ~mask;
286 return retval;
287 }
288
test_bit_le(u32 nr,const u8 * addr)289 int test_bit_le(u32 nr, const u8 *addr)
290 {
291 return ((1 << (nr & 7)) & (addr[nr >> 3]));
292 }
293
f2fs_test_bit(unsigned int nr,const char * p)294 int f2fs_test_bit(unsigned int nr, const char *p)
295 {
296 int mask;
297 char *addr = (char *)p;
298
299 addr += (nr >> 3);
300 mask = 1 << (7 - (nr & 0x07));
301 return (mask & *addr) != 0;
302 }
303
f2fs_set_bit(unsigned int nr,char * addr)304 int f2fs_set_bit(unsigned int nr, char *addr)
305 {
306 int mask;
307 int ret;
308
309 addr += (nr >> 3);
310 mask = 1 << (7 - (nr & 0x07));
311 ret = mask & *addr;
312 *addr |= mask;
313 return ret;
314 }
315
f2fs_clear_bit(unsigned int nr,char * addr)316 int f2fs_clear_bit(unsigned int nr, char *addr)
317 {
318 int mask;
319 int ret;
320
321 addr += (nr >> 3);
322 mask = 1 << (7 - (nr & 0x07));
323 ret = mask & *addr;
324 *addr &= ~mask;
325 return ret;
326 }
327
__ffs(u8 word)328 static inline u64 __ffs(u8 word)
329 {
330 int num = 0;
331
332 if ((word & 0xf) == 0) {
333 num += 4;
334 word >>= 4;
335 }
336 if ((word & 0x3) == 0) {
337 num += 2;
338 word >>= 2;
339 }
340 if ((word & 0x1) == 0)
341 num += 1;
342 return num;
343 }
344
345 /* Copied from linux/lib/find_bit.c */
346 #define BITMAP_FIRST_BYTE_MASK(start) (0xff << ((start) & (BITS_PER_BYTE - 1)))
347
_find_next_bit_le(const u8 * addr,u64 nbits,u64 start,char invert)348 static u64 _find_next_bit_le(const u8 *addr, u64 nbits, u64 start, char invert)
349 {
350 u8 tmp;
351
352 if (!nbits || start >= nbits)
353 return nbits;
354
355 tmp = addr[start / BITS_PER_BYTE] ^ invert;
356
357 /* Handle 1st word. */
358 tmp &= BITMAP_FIRST_BYTE_MASK(start);
359 start = round_down(start, BITS_PER_BYTE);
360
361 while (!tmp) {
362 start += BITS_PER_BYTE;
363 if (start >= nbits)
364 return nbits;
365
366 tmp = addr[start / BITS_PER_BYTE] ^ invert;
367 }
368
369 return min(start + __ffs(tmp), nbits);
370 }
371
find_next_bit_le(const u8 * addr,u64 size,u64 offset)372 u64 find_next_bit_le(const u8 *addr, u64 size, u64 offset)
373 {
374 return _find_next_bit_le(addr, size, offset, 0);
375 }
376
377
find_next_zero_bit_le(const u8 * addr,u64 size,u64 offset)378 u64 find_next_zero_bit_le(const u8 *addr, u64 size, u64 offset)
379 {
380 return _find_next_bit_le(addr, size, offset, 0xff);
381 }
382
383 /*
384 * Hashing code adapted from ext3
385 */
386 #define DELTA 0x9E3779B9
387
TEA_transform(unsigned int buf[4],unsigned int const in[])388 static void TEA_transform(unsigned int buf[4], unsigned int const in[])
389 {
390 __u32 sum = 0;
391 __u32 b0 = buf[0], b1 = buf[1];
392 __u32 a = in[0], b = in[1], c = in[2], d = in[3];
393 int n = 16;
394
395 do {
396 sum += DELTA;
397 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
398 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
399 } while (--n);
400
401 buf[0] += b0;
402 buf[1] += b1;
403
404 }
405
str2hashbuf(const unsigned char * msg,int len,unsigned int * buf,int num)406 static void str2hashbuf(const unsigned char *msg, int len,
407 unsigned int *buf, int num)
408 {
409 unsigned pad, val;
410 int i;
411
412 pad = (__u32)len | ((__u32)len << 8);
413 pad |= pad << 16;
414
415 val = pad;
416 if (len > num * 4)
417 len = num * 4;
418 for (i = 0; i < len; i++) {
419 if ((i % 4) == 0)
420 val = pad;
421 val = msg[i] + (val << 8);
422 if ((i % 4) == 3) {
423 *buf++ = val;
424 val = pad;
425 num--;
426 }
427 }
428 if (--num >= 0)
429 *buf++ = val;
430 while (--num >= 0)
431 *buf++ = pad;
432
433 }
434
435 /**
436 * Return hash value of directory entry
437 * @param name dentry name
438 * @param len name lenth
439 * @return return on success hash value, errno on failure
440 */
__f2fs_dentry_hash(const unsigned char * name,int len)441 static f2fs_hash_t __f2fs_dentry_hash(const unsigned char *name, int len)/* Need update */
442 {
443 __u32 hash;
444 f2fs_hash_t f2fs_hash;
445 const unsigned char *p;
446 __u32 in[8], buf[4];
447
448 /* special hash codes for special dentries */
449 if ((len <= 2) && (name[0] == '.') &&
450 (name[1] == '.' || name[1] == '\0'))
451 return 0;
452
453 /* Initialize the default seed for the hash checksum functions */
454 buf[0] = 0x67452301;
455 buf[1] = 0xefcdab89;
456 buf[2] = 0x98badcfe;
457 buf[3] = 0x10325476;
458
459 p = name;
460 while (1) {
461 str2hashbuf(p, len, in, 4);
462 TEA_transform(buf, in);
463 p += 16;
464 if (len <= 16)
465 break;
466 len -= 16;
467 }
468 hash = buf[0];
469
470 f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
471 return f2fs_hash;
472 }
473
f2fs_dentry_hash(int encoding,int casefolded,const unsigned char * name,int len)474 f2fs_hash_t f2fs_dentry_hash(int encoding, int casefolded,
475 const unsigned char *name, int len)
476 {
477 const struct f2fs_nls_table *table = f2fs_load_nls_table(encoding);
478 int r, dlen;
479 unsigned char *buff;
480
481 if (len && casefolded) {
482 buff = malloc(sizeof(char) * PATH_MAX);
483 if (!buff)
484 return -ENOMEM;
485 dlen = table->ops->casefold(table, name, len, buff, PATH_MAX);
486 if (dlen < 0) {
487 free(buff);
488 goto opaque_seq;
489 }
490 r = __f2fs_dentry_hash(buff, dlen);
491
492 free(buff);
493 return r;
494 }
495 opaque_seq:
496 return __f2fs_dentry_hash(name, len);
497 }
498
addrs_per_inode(struct f2fs_inode * i)499 unsigned int addrs_per_inode(struct f2fs_inode *i)
500 {
501 unsigned int addrs = CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
502
503 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
504 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
505 return addrs;
506 return ALIGN_DOWN(addrs, 1 << i->i_log_cluster_size);
507 }
508
addrs_per_block(struct f2fs_inode * i)509 unsigned int addrs_per_block(struct f2fs_inode *i)
510 {
511 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
512 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
513 return DEF_ADDRS_PER_BLOCK;
514 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1 << i->i_log_cluster_size);
515 }
516
f2fs_max_file_offset(struct f2fs_inode * i)517 unsigned int f2fs_max_file_offset(struct f2fs_inode *i)
518 {
519 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
520 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
521 return le64_to_cpu(i->i_size);
522 return ALIGN_UP(le64_to_cpu(i->i_size), 1 << i->i_log_cluster_size);
523 }
524
525 /*
526 * CRC32
527 */
528 #define CRCPOLY_LE 0xedb88320
529
f2fs_cal_crc32(uint32_t crc,void * buf,int len)530 uint32_t f2fs_cal_crc32(uint32_t crc, void *buf, int len)
531 {
532 int i;
533 unsigned char *p = (unsigned char *)buf;
534 while (len--) {
535 crc ^= *p++;
536 for (i = 0; i < 8; i++)
537 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
538 }
539 return crc;
540 }
541
f2fs_crc_valid(uint32_t blk_crc,void * buf,int len)542 int f2fs_crc_valid(uint32_t blk_crc, void *buf, int len)
543 {
544 uint32_t cal_crc = 0;
545
546 cal_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, buf, len);
547
548 if (cal_crc != blk_crc) {
549 DBG(0,"CRC validation failed: cal_crc = %u, "
550 "blk_crc = %u buff_size = 0x%x\n",
551 cal_crc, blk_crc, len);
552 return -1;
553 }
554 return 0;
555 }
556
f2fs_inode_chksum(struct f2fs_node * node)557 __u32 f2fs_inode_chksum(struct f2fs_node *node)
558 {
559 struct f2fs_inode *ri = &node->i;
560 __le32 ino = node->footer.ino;
561 __le32 gen = ri->i_generation;
562 __u32 chksum, chksum_seed;
563 __u32 dummy_cs = 0;
564 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
565 unsigned int cs_size = sizeof(dummy_cs);
566
567 chksum = f2fs_cal_crc32(c.chksum_seed, (__u8 *)&ino,
568 sizeof(ino));
569 chksum_seed = f2fs_cal_crc32(chksum, (__u8 *)&gen, sizeof(gen));
570
571 chksum = f2fs_cal_crc32(chksum_seed, (__u8 *)ri, offset);
572 chksum = f2fs_cal_crc32(chksum, (__u8 *)&dummy_cs, cs_size);
573 offset += cs_size;
574 chksum = f2fs_cal_crc32(chksum, (__u8 *)ri + offset,
575 F2FS_BLKSIZE - offset);
576 return chksum;
577 }
578
f2fs_checkpoint_chksum(struct f2fs_checkpoint * cp)579 __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *cp)
580 {
581 unsigned int chksum_ofs = le32_to_cpu(cp->checksum_offset);
582 __u32 chksum;
583
584 chksum = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, chksum_ofs);
585 if (chksum_ofs < CP_CHKSUM_OFFSET) {
586 chksum_ofs += sizeof(chksum);
587 chksum = f2fs_cal_crc32(chksum, (__u8 *)cp + chksum_ofs,
588 F2FS_BLKSIZE - chksum_ofs);
589 }
590 return chksum;
591 }
592
write_inode(struct f2fs_node * inode,u64 blkaddr)593 int write_inode(struct f2fs_node *inode, u64 blkaddr)
594 {
595 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
596 inode->i.i_inode_checksum =
597 cpu_to_le32(f2fs_inode_chksum(inode));
598 return dev_write_block(inode, blkaddr);
599 }
600
601 /*
602 * try to identify the root device
603 */
get_rootdev()604 char *get_rootdev()
605 {
606 #if defined(_WIN32) || defined(WITH_ANDROID)
607 return NULL;
608 #else
609 struct stat sb;
610 int fd, ret;
611 char buf[PATH_MAX + 1];
612 char *uevent, *ptr;
613 char *rootdev;
614
615 if (stat("/", &sb) == -1)
616 return NULL;
617
618 snprintf(buf, PATH_MAX, "/sys/dev/block/%u:%u/uevent",
619 major(sb.st_dev), minor(sb.st_dev));
620
621 fd = open(buf, O_RDONLY);
622
623 if (fd < 0)
624 return NULL;
625
626 ret = lseek(fd, (off_t)0, SEEK_END);
627 (void)lseek(fd, (off_t)0, SEEK_SET);
628
629 if (ret == -1) {
630 close(fd);
631 return NULL;
632 }
633
634 uevent = malloc(ret + 1);
635 ASSERT(uevent);
636
637 uevent[ret] = '\0';
638
639 ret = read(fd, uevent, ret);
640 close(fd);
641
642 ptr = strstr(uevent, "DEVNAME");
643 if (!ptr)
644 goto out_free;
645
646 ret = sscanf(ptr, "DEVNAME=%s\n", buf);
647 if (strlen(buf) == 0)
648 goto out_free;
649
650 ret = strlen(buf) + 5;
651 rootdev = malloc(ret + 1);
652 if (!rootdev)
653 goto out_free;
654 rootdev[ret] = '\0';
655
656 snprintf(rootdev, ret + 1, "/dev/%s", buf);
657 free(uevent);
658 return rootdev;
659
660 out_free:
661 free(uevent);
662 return NULL;
663 #endif
664 }
665
666 /*
667 * device information
668 */
f2fs_init_configuration(void)669 void f2fs_init_configuration(void)
670 {
671 int i;
672
673 memset(&c, 0, sizeof(struct f2fs_configuration));
674 c.ndevs = 1;
675 c.sectors_per_blk = DEFAULT_SECTORS_PER_BLOCK;
676 c.blks_per_seg = DEFAULT_BLOCKS_PER_SEGMENT;
677 c.wanted_total_sectors = -1;
678 c.wanted_sector_size = -1;
679 #ifndef WITH_ANDROID
680 c.preserve_limits = 1;
681 c.no_kernel_check = 1;
682 #else
683 c.no_kernel_check = 0;
684 #endif
685
686 for (i = 0; i < MAX_DEVICES; i++) {
687 c.devices[i].fd = -1;
688 c.devices[i].sector_size = DEFAULT_SECTOR_SIZE;
689 c.devices[i].end_blkaddr = -1;
690 c.devices[i].zoned_model = F2FS_ZONED_NONE;
691 }
692
693 /* calculated by overprovision ratio */
694 c.segs_per_sec = 1;
695 c.secs_per_zone = 1;
696 c.segs_per_zone = 1;
697 c.vol_label = "";
698 c.trim = 1;
699 c.kd = -1;
700 c.fixed_time = -1;
701 c.s_encoding = 0;
702 c.s_encoding_flags = 0;
703
704 /* default root owner */
705 c.root_uid = getuid();
706 c.root_gid = getgid();
707 }
708
f2fs_dev_is_writable(void)709 int f2fs_dev_is_writable(void)
710 {
711 return !c.ro || c.force;
712 }
713
714 #ifdef HAVE_SETMNTENT
is_mounted(const char * mpt,const char * device)715 static int is_mounted(const char *mpt, const char *device)
716 {
717 FILE *file = NULL;
718 struct mntent *mnt = NULL;
719
720 file = setmntent(mpt, "r");
721 if (file == NULL)
722 return 0;
723
724 while ((mnt = getmntent(file)) != NULL) {
725 if (!strcmp(device, mnt->mnt_fsname)) {
726 #ifdef MNTOPT_RO
727 if (hasmntopt(mnt, MNTOPT_RO))
728 c.ro = 1;
729 #endif
730 break;
731 }
732 }
733 endmntent(file);
734 return mnt ? 1 : 0;
735 }
736 #endif
737
f2fs_dev_is_umounted(char * path)738 int f2fs_dev_is_umounted(char *path)
739 {
740 #ifdef _WIN32
741 return 0;
742 #else
743 struct stat *st_buf;
744 int is_rootdev = 0;
745 int ret = 0;
746 char *rootdev_name = get_rootdev();
747
748 if (rootdev_name) {
749 if (!strcmp(path, rootdev_name))
750 is_rootdev = 1;
751 free(rootdev_name);
752 }
753
754 /*
755 * try with /proc/mounts fist to detect RDONLY.
756 * f2fs_stop_checkpoint makes RO in /proc/mounts while RW in /etc/mtab.
757 */
758 #ifdef __linux__
759 ret = is_mounted("/proc/mounts", path);
760 if (ret) {
761 MSG(0, "Info: Mounted device!\n");
762 return -1;
763 }
764 #endif
765 #if defined(MOUNTED) || defined(_PATH_MOUNTED)
766 #ifndef MOUNTED
767 #define MOUNTED _PATH_MOUNTED
768 #endif
769 ret = is_mounted(MOUNTED, path);
770 if (ret) {
771 MSG(0, "Info: Mounted device!\n");
772 return -1;
773 }
774 #endif
775 /*
776 * If we are supposed to operate on the root device, then
777 * also check the mounts for '/dev/root', which sometimes
778 * functions as an alias for the root device.
779 */
780 if (is_rootdev) {
781 #ifdef __linux__
782 ret = is_mounted("/proc/mounts", "/dev/root");
783 if (ret) {
784 MSG(0, "Info: Mounted device!\n");
785 return -1;
786 }
787 #endif
788 }
789
790 /*
791 * If f2fs is umounted with -l, the process can still use
792 * the file system. In this case, we should not format.
793 */
794 st_buf = malloc(sizeof(struct stat));
795 ASSERT(st_buf);
796
797 if (stat(path, st_buf) == 0 && S_ISBLK(st_buf->st_mode)) {
798 int fd = open(path, O_RDONLY | O_EXCL);
799
800 if (fd >= 0) {
801 close(fd);
802 } else if (errno == EBUSY) {
803 MSG(0, "\tError: In use by the system!\n");
804 free(st_buf);
805 return -1;
806 }
807 }
808 free(st_buf);
809 return ret;
810 #endif
811 }
812
f2fs_devs_are_umounted(void)813 int f2fs_devs_are_umounted(void)
814 {
815 int i;
816
817 for (i = 0; i < c.ndevs; i++)
818 if (f2fs_dev_is_umounted((char *)c.devices[i].path))
819 return -1;
820 return 0;
821 }
822
get_kernel_version(__u8 * version)823 void get_kernel_version(__u8 *version)
824 {
825 int i;
826 for (i = 0; i < VERSION_NAME_LEN; i++) {
827 if (version[i] == '\n')
828 break;
829 }
830 memset(version + i, 0, VERSION_LEN + 1 - i);
831 }
832
get_kernel_uname_version(__u8 * version)833 void get_kernel_uname_version(__u8 *version)
834 {
835 #ifdef HAVE_SYS_UTSNAME_H
836 struct utsname buf;
837
838 memset(version, 0, VERSION_LEN);
839 if (uname(&buf))
840 return;
841
842 #if defined(WITH_KERNEL_VERSION)
843 snprintf((char *)version,
844 VERSION_NAME_LEN, "%s %s", buf.release, buf.version);
845 #else
846 snprintf((char *)version,
847 VERSION_NAME_LEN, "%s", buf.release);
848 #endif
849 #else
850 memset(version, 0, VERSION_LEN);
851 #endif
852 }
853
854 #if defined(__linux__) && defined(_IO) && !defined(BLKGETSIZE)
855 #define BLKGETSIZE _IO(0x12,96)
856 #endif
857
858 #if defined(__linux__) && defined(_IOR) && !defined(BLKGETSIZE64)
859 #define BLKGETSIZE64 _IOR(0x12,114, size_t)
860 #endif
861
862 #if defined(__linux__) && defined(_IO) && !defined(BLKSSZGET)
863 #define BLKSSZGET _IO(0x12,104)
864 #endif
865
866 #if defined(__APPLE__)
867 #include <sys/disk.h>
868 #define BLKGETSIZE DKIOCGETBLOCKCOUNT
869 #define BLKSSZGET DKIOCGETBLOCKCOUNT
870 #endif /* APPLE_DARWIN */
871
872 #ifndef _WIN32
open_check_fs(char * path,int flag)873 static int open_check_fs(char *path, int flag)
874 {
875 if (c.func != DUMP && (c.func != FSCK || c.fix_on || c.auto_fix))
876 return -1;
877
878 /* allow to open ro */
879 return open(path, O_RDONLY | flag);
880 }
881
882 #ifdef __linux__
is_power_of_2(unsigned long n)883 static int is_power_of_2(unsigned long n)
884 {
885 return (n != 0 && ((n & (n - 1)) == 0));
886 }
887 #endif
888
get_device_info(int i)889 int get_device_info(int i)
890 {
891 int32_t fd = 0;
892 uint32_t sector_size;
893 #ifndef BLKGETSIZE64
894 uint32_t total_sectors;
895 #endif
896 struct stat *stat_buf;
897 #ifdef HDIO_GETGIO
898 struct hd_geometry geom;
899 #endif
900 #if !defined(WITH_ANDROID) && defined(__linux__)
901 sg_io_hdr_t io_hdr;
902 unsigned char reply_buffer[96] = {0};
903 unsigned char model_inq[6] = {MODELINQUIRY};
904 #endif
905 struct device_info *dev = c.devices + i;
906
907 if (c.sparse_mode) {
908 fd = open(dev->path, O_RDWR | O_CREAT | O_BINARY, 0644);
909 if (fd < 0) {
910 fd = open_check_fs(dev->path, O_BINARY);
911 if (fd < 0) {
912 MSG(0, "\tError: Failed to open a sparse file!\n");
913 return -1;
914 }
915 }
916 }
917
918 stat_buf = malloc(sizeof(struct stat));
919 ASSERT(stat_buf);
920
921 if (!c.sparse_mode) {
922 if (stat(dev->path, stat_buf) < 0 ) {
923 MSG(0, "\tError: Failed to get the device stat!\n");
924 free(stat_buf);
925 return -1;
926 }
927
928 if (S_ISBLK(stat_buf->st_mode) &&
929 !c.force && c.func != DUMP && !c.dry_run) {
930 fd = open(dev->path, O_RDWR | O_EXCL);
931 if (fd < 0)
932 fd = open_check_fs(dev->path, O_EXCL);
933 } else {
934 fd = open(dev->path, O_RDWR);
935 if (fd < 0)
936 fd = open_check_fs(dev->path, 0);
937 }
938 }
939 if (fd < 0) {
940 MSG(0, "\tError: Failed to open the device!\n");
941 free(stat_buf);
942 return -1;
943 }
944
945 dev->fd = fd;
946
947 if (c.sparse_mode) {
948 if (f2fs_init_sparse_file()) {
949 free(stat_buf);
950 return -1;
951 }
952 }
953
954 if (c.kd == -1) {
955 #if !defined(WITH_ANDROID) && defined(__linux__)
956 c.kd = open("/proc/version", O_RDONLY);
957 #endif
958 if (c.kd < 0) {
959 MSG(0, "Info: not exist /proc/version!\n");
960 c.kd = -2;
961 }
962 }
963
964 if (c.sparse_mode) {
965 dev->total_sectors = c.device_size / dev->sector_size;
966 } else if (S_ISREG(stat_buf->st_mode)) {
967 dev->total_sectors = stat_buf->st_size / dev->sector_size;
968 } else if (S_ISBLK(stat_buf->st_mode)) {
969 #ifdef BLKSSZGET
970 if (ioctl(fd, BLKSSZGET, §or_size) < 0)
971 MSG(0, "\tError: Using the default sector size\n");
972 else if (dev->sector_size < sector_size)
973 dev->sector_size = sector_size;
974 #endif
975 #ifdef BLKGETSIZE64
976 if (ioctl(fd, BLKGETSIZE64, &dev->total_sectors) < 0) {
977 MSG(0, "\tError: Cannot get the device size\n");
978 free(stat_buf);
979 return -1;
980 }
981 #else
982 if (ioctl(fd, BLKGETSIZE, &total_sectors) < 0) {
983 MSG(0, "\tError: Cannot get the device size\n");
984 free(stat_buf);
985 return -1;
986 }
987 dev->total_sectors = total_sectors;
988 #endif
989 dev->total_sectors /= dev->sector_size;
990
991 if (i == 0) {
992 #ifdef HDIO_GETGIO
993 if (ioctl(fd, HDIO_GETGEO, &geom) < 0)
994 c.start_sector = 0;
995 else
996 c.start_sector = geom.start;
997 #else
998 c.start_sector = 0;
999 #endif
1000 }
1001
1002 #if !defined(WITH_ANDROID) && defined(__linux__)
1003 /* Send INQUIRY command */
1004 memset(&io_hdr, 0, sizeof(sg_io_hdr_t));
1005 io_hdr.interface_id = 'S';
1006 io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
1007 io_hdr.dxfer_len = sizeof(reply_buffer);
1008 io_hdr.dxferp = reply_buffer;
1009 io_hdr.cmd_len = sizeof(model_inq);
1010 io_hdr.cmdp = model_inq;
1011 io_hdr.timeout = 1000;
1012
1013 if (!ioctl(fd, SG_IO, &io_hdr)) {
1014 MSG(0, "Info: [%s] Disk Model: %.16s\n",
1015 dev->path, reply_buffer+16);
1016 }
1017 #endif
1018 } else {
1019 MSG(0, "\tError: Volume type is not supported!!!\n");
1020 free(stat_buf);
1021 return -1;
1022 }
1023
1024 if (!c.sector_size) {
1025 c.sector_size = dev->sector_size;
1026 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1027 } else if (c.sector_size != c.devices[i].sector_size) {
1028 MSG(0, "\tError: Different sector sizes!!!\n");
1029 free(stat_buf);
1030 return -1;
1031 }
1032
1033 #ifdef __linux__
1034 if (S_ISBLK(stat_buf->st_mode)) {
1035 if (f2fs_get_zoned_model(i) < 0) {
1036 free(stat_buf);
1037 return -1;
1038 }
1039 }
1040
1041 if (dev->zoned_model != F2FS_ZONED_NONE) {
1042
1043 /* Get the number of blocks per zones */
1044 if (f2fs_get_zone_blocks(i)) {
1045 MSG(0, "\tError: Failed to get number of blocks per zone\n");
1046 free(stat_buf);
1047 return -1;
1048 }
1049
1050 if (!is_power_of_2(dev->zone_size))
1051 MSG(0, "Info: zoned: zone size %" PRIu64 "u (not a power of 2)\n",
1052 dev->zone_size);
1053
1054 /*
1055 * Check zone configuration: for the first disk of a
1056 * multi-device volume, conventional zones are needed.
1057 */
1058 if (f2fs_check_zones(i)) {
1059 MSG(0, "\tError: Failed to check zone configuration\n");
1060 free(stat_buf);
1061 return -1;
1062 }
1063 MSG(0, "Info: Host-%s zoned block device:\n",
1064 (dev->zoned_model == F2FS_ZONED_HA) ?
1065 "aware" : "managed");
1066 MSG(0, " %u zones, %" PRIu64 "u zone size(bytes), %u randomly writeable zones\n",
1067 dev->nr_zones, dev->zone_size,
1068 dev->nr_rnd_zones);
1069 MSG(0, " %zu blocks per zone\n",
1070 dev->zone_blocks);
1071 }
1072 #endif
1073 /* adjust wanted_total_sectors */
1074 if (c.wanted_total_sectors != -1) {
1075 MSG(0, "Info: wanted sectors = %"PRIu64" (in %"PRIu64" bytes)\n",
1076 c.wanted_total_sectors, c.wanted_sector_size);
1077 if (c.wanted_sector_size == -1) {
1078 c.wanted_sector_size = dev->sector_size;
1079 } else if (dev->sector_size != c.wanted_sector_size) {
1080 c.wanted_total_sectors *= c.wanted_sector_size;
1081 c.wanted_total_sectors /= dev->sector_size;
1082 }
1083 }
1084
1085 c.total_sectors += dev->total_sectors;
1086 free(stat_buf);
1087 return 0;
1088 }
1089
1090 #else
1091
1092 #include "windows.h"
1093 #include "winioctl.h"
1094
1095 #if (_WIN32_WINNT >= 0x0500)
1096 #define HAVE_GET_FILE_SIZE_EX 1
1097 #endif
1098
win_get_device_size(const char * file,uint64_t * device_size)1099 static int win_get_device_size(const char *file, uint64_t *device_size)
1100 {
1101 HANDLE dev;
1102 PARTITION_INFORMATION pi;
1103 DISK_GEOMETRY gi;
1104 DWORD retbytes;
1105 #ifdef HAVE_GET_FILE_SIZE_EX
1106 LARGE_INTEGER filesize;
1107 #else
1108 DWORD filesize;
1109 #endif /* HAVE_GET_FILE_SIZE_EX */
1110
1111 dev = CreateFile(file, GENERIC_READ,
1112 FILE_SHARE_READ | FILE_SHARE_WRITE ,
1113 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
1114
1115 if (dev == INVALID_HANDLE_VALUE)
1116 return EBADF;
1117 if (DeviceIoControl(dev, IOCTL_DISK_GET_PARTITION_INFO,
1118 &pi, sizeof(PARTITION_INFORMATION),
1119 &pi, sizeof(PARTITION_INFORMATION),
1120 &retbytes, NULL)) {
1121
1122 *device_size = pi.PartitionLength.QuadPart;
1123
1124 } else if (DeviceIoControl(dev, IOCTL_DISK_GET_DRIVE_GEOMETRY,
1125 &gi, sizeof(DISK_GEOMETRY),
1126 &gi, sizeof(DISK_GEOMETRY),
1127 &retbytes, NULL)) {
1128
1129 *device_size = gi.BytesPerSector *
1130 gi.SectorsPerTrack *
1131 gi.TracksPerCylinder *
1132 gi.Cylinders.QuadPart;
1133
1134 #ifdef HAVE_GET_FILE_SIZE_EX
1135 } else if (GetFileSizeEx(dev, &filesize)) {
1136 *device_size = filesize.QuadPart;
1137 }
1138 #else
1139 } else {
1140 filesize = GetFileSize(dev, NULL);
1141 if (INVALID_FILE_SIZE != filesize)
1142 return -1;
1143 *device_size = filesize;
1144 }
1145 #endif /* HAVE_GET_FILE_SIZE_EX */
1146
1147 CloseHandle(dev);
1148 return 0;
1149 }
1150
get_device_info(int i)1151 int get_device_info(int i)
1152 {
1153 struct device_info *dev = c.devices + i;
1154 uint64_t device_size = 0;
1155 int32_t fd = 0;
1156
1157 /* Block device target is not supported on Windows. */
1158 if (!c.sparse_mode) {
1159 if (win_get_device_size(dev->path, &device_size)) {
1160 MSG(0, "\tError: Failed to get device size!\n");
1161 return -1;
1162 }
1163 } else {
1164 device_size = c.device_size;
1165 }
1166 if (c.sparse_mode) {
1167 fd = open((char *)dev->path, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
1168 } else {
1169 fd = open((char *)dev->path, O_RDWR | O_BINARY);
1170 }
1171 if (fd < 0) {
1172 MSG(0, "\tError: Failed to open the device!\n");
1173 return -1;
1174 }
1175 dev->fd = fd;
1176 dev->total_sectors = device_size / dev->sector_size;
1177 c.start_sector = 0;
1178 c.sector_size = dev->sector_size;
1179 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1180 c.total_sectors += dev->total_sectors;
1181
1182 if (c.sparse_mode && f2fs_init_sparse_file())
1183 return -1;
1184 return 0;
1185 }
1186 #endif
1187
f2fs_get_device_info(void)1188 int f2fs_get_device_info(void)
1189 {
1190 int i;
1191
1192 for (i = 0; i < c.ndevs; i++)
1193 if (get_device_info(i))
1194 return -1;
1195 return 0;
1196 }
1197
f2fs_get_f2fs_info(void)1198 int f2fs_get_f2fs_info(void)
1199 {
1200 int i;
1201
1202 if (c.wanted_total_sectors < c.total_sectors) {
1203 MSG(0, "Info: total device sectors = %"PRIu64" (in %u bytes)\n",
1204 c.total_sectors, c.sector_size);
1205 c.total_sectors = c.wanted_total_sectors;
1206 c.devices[0].total_sectors = c.total_sectors;
1207 }
1208 if (c.total_sectors * c.sector_size >
1209 (uint64_t)F2FS_MAX_SEGMENT * 2 * 1024 * 1024) {
1210 MSG(0, "\tError: F2FS can support 16TB at most!!!\n");
1211 return -1;
1212 }
1213
1214 /*
1215 * Check device types and determine the final volume operation mode:
1216 * - If all devices are regular block devices, default operation.
1217 * - If at least one HM device is found, operate in HM mode (BLKZONED
1218 * feature will be enabled by mkfs).
1219 * - If an HA device is found, let mkfs decide based on the -m option
1220 * setting by the user.
1221 */
1222 c.zoned_model = F2FS_ZONED_NONE;
1223 for (i = 0; i < c.ndevs; i++) {
1224 switch (c.devices[i].zoned_model) {
1225 case F2FS_ZONED_NONE:
1226 continue;
1227 case F2FS_ZONED_HM:
1228 c.zoned_model = F2FS_ZONED_HM;
1229 break;
1230 case F2FS_ZONED_HA:
1231 if (c.zoned_model != F2FS_ZONED_HM)
1232 c.zoned_model = F2FS_ZONED_HA;
1233 break;
1234 }
1235 }
1236
1237 if (c.zoned_model != F2FS_ZONED_NONE) {
1238
1239 /*
1240 * For zoned model, the zones sizes of all zoned devices must
1241 * be equal.
1242 */
1243 for (i = 0; i < c.ndevs; i++) {
1244 if (c.devices[i].zoned_model == F2FS_ZONED_NONE)
1245 continue;
1246 if (c.zone_blocks &&
1247 c.zone_blocks != c.devices[i].zone_blocks) {
1248 MSG(0, "\tError: zones of different size are "
1249 "not supported\n");
1250 return -1;
1251 }
1252 c.zone_blocks = c.devices[i].zone_blocks;
1253 }
1254
1255 /*
1256 * Align sections to the device zone size and align F2FS zones
1257 * to the device zones. For F2FS_ZONED_HA model without the
1258 * BLKZONED feature set at format time, this is only an
1259 * optimization as sequential writes will not be enforced.
1260 */
1261 c.segs_per_sec = c.zone_blocks / DEFAULT_BLOCKS_PER_SEGMENT;
1262 c.secs_per_zone = 1;
1263 } else {
1264 if(c.zoned_mode != 0) {
1265 MSG(0, "\n Error: %s may not be a zoned block device \n",
1266 c.devices[0].path);
1267 return -1;
1268 }
1269 }
1270
1271 c.segs_per_zone = c.segs_per_sec * c.secs_per_zone;
1272
1273 if (c.func != MKFS)
1274 return 0;
1275
1276 MSG(0, "Info: Segments per section = %d\n", c.segs_per_sec);
1277 MSG(0, "Info: Sections per zone = %d\n", c.secs_per_zone);
1278 MSG(0, "Info: sector size = %u\n", c.sector_size);
1279 MSG(0, "Info: total sectors = %"PRIu64" (%"PRIu64" MB)\n",
1280 c.total_sectors, (c.total_sectors *
1281 (c.sector_size >> 9)) >> 11);
1282 return 0;
1283 }
1284
calc_extra_isize(void)1285 unsigned int calc_extra_isize(void)
1286 {
1287 unsigned int size = offsetof(struct f2fs_inode, i_projid);
1288
1289 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
1290 size = offsetof(struct f2fs_inode, i_projid);
1291 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1292 size = offsetof(struct f2fs_inode, i_inode_checksum);
1293 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
1294 size = offsetof(struct f2fs_inode, i_crtime);
1295 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME))
1296 size = offsetof(struct f2fs_inode, i_compr_blocks);
1297 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION))
1298 size = offsetof(struct f2fs_inode, i_extra_end);
1299
1300 return size - F2FS_EXTRA_ISIZE_OFFSET;
1301 }
1302
1303 #define ARRAY_SIZE(array) \
1304 (sizeof(array) / sizeof(array[0]))
1305
1306 static const struct {
1307 char *name;
1308 __u16 encoding_magic;
1309 __u16 default_flags;
1310
1311 } f2fs_encoding_map[] = {
1312 {
1313 .encoding_magic = F2FS_ENC_UTF8_12_1,
1314 .name = "utf8",
1315 .default_flags = 0,
1316 },
1317 };
1318
1319 static const struct enc_flags {
1320 __u16 flag;
1321 char *param;
1322 } encoding_flags[] = {
1323 { F2FS_ENC_STRICT_MODE_FL, "strict" },
1324 };
1325
1326 /* Return a positive number < 0xff indicating the encoding magic number
1327 * or a negative value indicating error. */
f2fs_str2encoding(const char * string)1328 int f2fs_str2encoding(const char *string)
1329 {
1330 int i;
1331
1332 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1333 if (!strcmp(string, f2fs_encoding_map[i].name))
1334 return f2fs_encoding_map[i].encoding_magic;
1335
1336 return -EINVAL;
1337 }
1338
f2fs_encoding2str(const int encoding)1339 char *f2fs_encoding2str(const int encoding)
1340 {
1341 int i;
1342
1343 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1344 if (f2fs_encoding_map[i].encoding_magic == encoding)
1345 return f2fs_encoding_map[i].name;
1346
1347 return NULL;
1348 }
1349
f2fs_get_encoding_flags(int encoding)1350 int f2fs_get_encoding_flags(int encoding)
1351 {
1352 int i;
1353
1354 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1355 if (f2fs_encoding_map[i].encoding_magic == encoding)
1356 return f2fs_encoding_map[encoding].default_flags;
1357
1358 return 0;
1359 }
1360
f2fs_str2encoding_flags(char ** param,__u16 * flags)1361 int f2fs_str2encoding_flags(char **param, __u16 *flags)
1362 {
1363 char *f = strtok(*param, ",");
1364 const struct enc_flags *fl;
1365 int i, neg = 0;
1366
1367 while (f) {
1368 neg = 0;
1369 if (!strncmp("no", f, 2)) {
1370 neg = 1;
1371 f += 2;
1372 }
1373
1374 for (i = 0; i < ARRAY_SIZE(encoding_flags); i++) {
1375 fl = &encoding_flags[i];
1376 if (!strcmp(fl->param, f)) {
1377 if (neg) {
1378 MSG(0, "Sub %s\n", fl->param);
1379 *flags &= ~fl->flag;
1380 } else {
1381 MSG(0, "Add %s\n", fl->param);
1382 *flags |= fl->flag;
1383 }
1384
1385 goto next_flag;
1386 }
1387 }
1388 *param = f;
1389 return -EINVAL;
1390 next_flag:
1391 f = strtok(NULL, ":");
1392 }
1393 return 0;
1394 }
1395