1 /**
2 * libf2fs.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * Dual licensed under the GPL or LGPL version 2 licenses.
8 */
9 #define _LARGEFILE64_SOURCE
10 #define _FILE_OFFSET_BITS 64
11
12 #include <f2fs_fs.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <errno.h>
17 #include <unistd.h>
18 #include <fcntl.h>
19 #include <libgen.h>
20 #ifdef HAVE_MNTENT_H
21 #include <mntent.h>
22 #endif
23 #include <time.h>
24 #include <sys/stat.h>
25 #ifdef HAVE_SYS_IOCTL_H
26 #include <sys/ioctl.h>
27 #endif
28 #ifdef HAVE_SYS_SYSMACROS_H
29 #include <sys/sysmacros.h>
30 #endif
31 #ifdef HAVE_SYS_UTSNAME_H
32 #include <sys/utsname.h>
33 #endif
34 #ifdef HAVE_SCSI_SG_H
35 #include <scsi/sg.h>
36 #endif
37 #ifdef HAVE_LINUX_HDREG_H
38 #include <linux/hdreg.h>
39 #endif
40 #ifdef HAVE_LINUX_LIMITS_H
41 #include <linux/limits.h>
42 #endif
43
44 /* SCSI command for standard inquiry*/
45 #define MODELINQUIRY 0x12,0x00,0x00,0x00,0x4A,0x00
46
47 #ifndef _WIN32 /* O_BINARY is windows-specific flag */
48 #define O_BINARY 0
49 #else
50 /* On Windows, wchar_t is 8 bit sized and it causes compilation errors. */
51 #define wchar_t int
52 #endif
53
54 /*
55 * UTF conversion codes are Copied from exfat tools.
56 */
utf8_to_wchar(const char * input,wchar_t * wc,size_t insize)57 static const char *utf8_to_wchar(const char *input, wchar_t *wc,
58 size_t insize)
59 {
60 if ((input[0] & 0x80) == 0 && insize >= 1) {
61 *wc = (wchar_t) input[0];
62 return input + 1;
63 }
64 if ((input[0] & 0xe0) == 0xc0 && insize >= 2) {
65 *wc = (((wchar_t) input[0] & 0x1f) << 6) |
66 ((wchar_t) input[1] & 0x3f);
67 return input + 2;
68 }
69 if ((input[0] & 0xf0) == 0xe0 && insize >= 3) {
70 *wc = (((wchar_t) input[0] & 0x0f) << 12) |
71 (((wchar_t) input[1] & 0x3f) << 6) |
72 ((wchar_t) input[2] & 0x3f);
73 return input + 3;
74 }
75 if ((input[0] & 0xf8) == 0xf0 && insize >= 4) {
76 *wc = (((wchar_t) input[0] & 0x07) << 18) |
77 (((wchar_t) input[1] & 0x3f) << 12) |
78 (((wchar_t) input[2] & 0x3f) << 6) |
79 ((wchar_t) input[3] & 0x3f);
80 return input + 4;
81 }
82 if ((input[0] & 0xfc) == 0xf8 && insize >= 5) {
83 *wc = (((wchar_t) input[0] & 0x03) << 24) |
84 (((wchar_t) input[1] & 0x3f) << 18) |
85 (((wchar_t) input[2] & 0x3f) << 12) |
86 (((wchar_t) input[3] & 0x3f) << 6) |
87 ((wchar_t) input[4] & 0x3f);
88 return input + 5;
89 }
90 if ((input[0] & 0xfe) == 0xfc && insize >= 6) {
91 *wc = (((wchar_t) input[0] & 0x01) << 30) |
92 (((wchar_t) input[1] & 0x3f) << 24) |
93 (((wchar_t) input[2] & 0x3f) << 18) |
94 (((wchar_t) input[3] & 0x3f) << 12) |
95 (((wchar_t) input[4] & 0x3f) << 6) |
96 ((wchar_t) input[5] & 0x3f);
97 return input + 6;
98 }
99 return NULL;
100 }
101
wchar_to_utf16(uint16_t * output,wchar_t wc,size_t outsize)102 static uint16_t *wchar_to_utf16(uint16_t *output, wchar_t wc, size_t outsize)
103 {
104 if (wc <= 0xffff) {
105 if (outsize == 0)
106 return NULL;
107 output[0] = cpu_to_le16(wc);
108 return output + 1;
109 }
110 if (outsize < 2)
111 return NULL;
112 wc -= 0x10000;
113 output[0] = cpu_to_le16(0xd800 | ((wc >> 10) & 0x3ff));
114 output[1] = cpu_to_le16(0xdc00 | (wc & 0x3ff));
115 return output + 2;
116 }
117
utf8_to_utf16(uint16_t * output,const char * input,size_t outsize,size_t insize)118 int utf8_to_utf16(uint16_t *output, const char *input, size_t outsize,
119 size_t insize)
120 {
121 const char *inp = input;
122 uint16_t *outp = output;
123 wchar_t wc;
124
125 while ((size_t)(inp - input) < insize && *inp) {
126 inp = utf8_to_wchar(inp, &wc, insize - (inp - input));
127 if (inp == NULL) {
128 DBG(0, "illegal UTF-8 sequence\n");
129 return -EILSEQ;
130 }
131 outp = wchar_to_utf16(outp, wc, outsize - (outp - output));
132 if (outp == NULL) {
133 DBG(0, "name is too long\n");
134 return -ENAMETOOLONG;
135 }
136 }
137 *outp = cpu_to_le16(0);
138 return 0;
139 }
140
utf16_to_wchar(const uint16_t * input,wchar_t * wc,size_t insize)141 static const uint16_t *utf16_to_wchar(const uint16_t *input, wchar_t *wc,
142 size_t insize)
143 {
144 if ((le16_to_cpu(input[0]) & 0xfc00) == 0xd800) {
145 if (insize < 2 || (le16_to_cpu(input[1]) & 0xfc00) != 0xdc00)
146 return NULL;
147 *wc = ((wchar_t) (le16_to_cpu(input[0]) & 0x3ff) << 10);
148 *wc |= (le16_to_cpu(input[1]) & 0x3ff);
149 *wc += 0x10000;
150 return input + 2;
151 } else {
152 *wc = le16_to_cpu(*input);
153 return input + 1;
154 }
155 }
156
wchar_to_utf8(char * output,wchar_t wc,size_t outsize)157 static char *wchar_to_utf8(char *output, wchar_t wc, size_t outsize)
158 {
159 if (wc <= 0x7f) {
160 if (outsize < 1)
161 return NULL;
162 *output++ = (char) wc;
163 } else if (wc <= 0x7ff) {
164 if (outsize < 2)
165 return NULL;
166 *output++ = 0xc0 | (wc >> 6);
167 *output++ = 0x80 | (wc & 0x3f);
168 } else if (wc <= 0xffff) {
169 if (outsize < 3)
170 return NULL;
171 *output++ = 0xe0 | (wc >> 12);
172 *output++ = 0x80 | ((wc >> 6) & 0x3f);
173 *output++ = 0x80 | (wc & 0x3f);
174 } else if (wc <= 0x1fffff) {
175 if (outsize < 4)
176 return NULL;
177 *output++ = 0xf0 | (wc >> 18);
178 *output++ = 0x80 | ((wc >> 12) & 0x3f);
179 *output++ = 0x80 | ((wc >> 6) & 0x3f);
180 *output++ = 0x80 | (wc & 0x3f);
181 } else if (wc <= 0x3ffffff) {
182 if (outsize < 5)
183 return NULL;
184 *output++ = 0xf8 | (wc >> 24);
185 *output++ = 0x80 | ((wc >> 18) & 0x3f);
186 *output++ = 0x80 | ((wc >> 12) & 0x3f);
187 *output++ = 0x80 | ((wc >> 6) & 0x3f);
188 *output++ = 0x80 | (wc & 0x3f);
189 } else if (wc <= 0x7fffffff) {
190 if (outsize < 6)
191 return NULL;
192 *output++ = 0xfc | (wc >> 30);
193 *output++ = 0x80 | ((wc >> 24) & 0x3f);
194 *output++ = 0x80 | ((wc >> 18) & 0x3f);
195 *output++ = 0x80 | ((wc >> 12) & 0x3f);
196 *output++ = 0x80 | ((wc >> 6) & 0x3f);
197 *output++ = 0x80 | (wc & 0x3f);
198 } else
199 return NULL;
200
201 return output;
202 }
203
utf16_to_utf8(char * output,const uint16_t * input,size_t outsize,size_t insize)204 int utf16_to_utf8(char *output, const uint16_t *input, size_t outsize,
205 size_t insize)
206 {
207 const uint16_t *inp = input;
208 char *outp = output;
209 wchar_t wc;
210
211 while ((size_t)(inp - input) < insize && le16_to_cpu(*inp)) {
212 inp = utf16_to_wchar(inp, &wc, insize - (inp - input));
213 if (inp == NULL) {
214 DBG(0, "illegal UTF-16 sequence\n");
215 return -EILSEQ;
216 }
217 outp = wchar_to_utf8(outp, wc, outsize - (outp - output));
218 if (outp == NULL) {
219 DBG(0, "name is too long\n");
220 return -ENAMETOOLONG;
221 }
222 }
223 *outp = '\0';
224 return 0;
225 }
226
log_base_2(uint32_t num)227 int log_base_2(uint32_t num)
228 {
229 int ret = 0;
230 if (num <= 0 || (num & (num - 1)) != 0)
231 return -1;
232
233 while (num >>= 1)
234 ret++;
235 return ret;
236 }
237
238 /*
239 * f2fs bit operations
240 */
241 static const int bits_in_byte[256] = {
242 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
243 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
244 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
245 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
246 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
247 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
248 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
249 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
250 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
251 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
252 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
253 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
254 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
255 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
256 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
257 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
258 };
259
get_bits_in_byte(unsigned char n)260 int get_bits_in_byte(unsigned char n)
261 {
262 return bits_in_byte[n];
263 }
264
test_and_set_bit_le(u32 nr,u8 * addr)265 int test_and_set_bit_le(u32 nr, u8 *addr)
266 {
267 int mask, retval;
268
269 addr += nr >> 3;
270 mask = 1 << ((nr & 0x07));
271 retval = mask & *addr;
272 *addr |= mask;
273 return retval;
274 }
275
test_and_clear_bit_le(u32 nr,u8 * addr)276 int test_and_clear_bit_le(u32 nr, u8 *addr)
277 {
278 int mask, retval;
279
280 addr += nr >> 3;
281 mask = 1 << ((nr & 0x07));
282 retval = mask & *addr;
283 *addr &= ~mask;
284 return retval;
285 }
286
test_bit_le(u32 nr,const u8 * addr)287 int test_bit_le(u32 nr, const u8 *addr)
288 {
289 return ((1 << (nr & 7)) & (addr[nr >> 3]));
290 }
291
f2fs_test_bit(unsigned int nr,const char * p)292 int f2fs_test_bit(unsigned int nr, const char *p)
293 {
294 int mask;
295 char *addr = (char *)p;
296
297 addr += (nr >> 3);
298 mask = 1 << (7 - (nr & 0x07));
299 return (mask & *addr) != 0;
300 }
301
f2fs_set_bit(unsigned int nr,char * addr)302 int f2fs_set_bit(unsigned int nr, char *addr)
303 {
304 int mask;
305 int ret;
306
307 addr += (nr >> 3);
308 mask = 1 << (7 - (nr & 0x07));
309 ret = mask & *addr;
310 *addr |= mask;
311 return ret;
312 }
313
f2fs_clear_bit(unsigned int nr,char * addr)314 int f2fs_clear_bit(unsigned int nr, char *addr)
315 {
316 int mask;
317 int ret;
318
319 addr += (nr >> 3);
320 mask = 1 << (7 - (nr & 0x07));
321 ret = mask & *addr;
322 *addr &= ~mask;
323 return ret;
324 }
325
__ffs(u8 word)326 static inline u64 __ffs(u8 word)
327 {
328 int num = 0;
329
330 if ((word & 0xf) == 0) {
331 num += 4;
332 word >>= 4;
333 }
334 if ((word & 0x3) == 0) {
335 num += 2;
336 word >>= 2;
337 }
338 if ((word & 0x1) == 0)
339 num += 1;
340 return num;
341 }
342
343 /* Copied from linux/lib/find_bit.c */
344 #define BITMAP_FIRST_BYTE_MASK(start) (0xff << ((start) & (BITS_PER_BYTE - 1)))
345
_find_next_bit_le(const u8 * addr,u64 nbits,u64 start,char invert)346 static u64 _find_next_bit_le(const u8 *addr, u64 nbits, u64 start, char invert)
347 {
348 u8 tmp;
349
350 if (!nbits || start >= nbits)
351 return nbits;
352
353 tmp = addr[start / BITS_PER_BYTE] ^ invert;
354
355 /* Handle 1st word. */
356 tmp &= BITMAP_FIRST_BYTE_MASK(start);
357 start = round_down(start, BITS_PER_BYTE);
358
359 while (!tmp) {
360 start += BITS_PER_BYTE;
361 if (start >= nbits)
362 return nbits;
363
364 tmp = addr[start / BITS_PER_BYTE] ^ invert;
365 }
366
367 return min(start + __ffs(tmp), nbits);
368 }
369
find_next_bit_le(const u8 * addr,u64 size,u64 offset)370 u64 find_next_bit_le(const u8 *addr, u64 size, u64 offset)
371 {
372 return _find_next_bit_le(addr, size, offset, 0);
373 }
374
375
find_next_zero_bit_le(const u8 * addr,u64 size,u64 offset)376 u64 find_next_zero_bit_le(const u8 *addr, u64 size, u64 offset)
377 {
378 return _find_next_bit_le(addr, size, offset, 0xff);
379 }
380
381 /*
382 * Hashing code adapted from ext3
383 */
384 #define DELTA 0x9E3779B9
385
TEA_transform(unsigned int buf[4],unsigned int const in[])386 static void TEA_transform(unsigned int buf[4], unsigned int const in[])
387 {
388 __u32 sum = 0;
389 __u32 b0 = buf[0], b1 = buf[1];
390 __u32 a = in[0], b = in[1], c = in[2], d = in[3];
391 int n = 16;
392
393 do {
394 sum += DELTA;
395 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
396 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
397 } while (--n);
398
399 buf[0] += b0;
400 buf[1] += b1;
401
402 }
403
str2hashbuf(const unsigned char * msg,int len,unsigned int * buf,int num)404 static void str2hashbuf(const unsigned char *msg, int len,
405 unsigned int *buf, int num)
406 {
407 unsigned pad, val;
408 int i;
409
410 pad = (__u32)len | ((__u32)len << 8);
411 pad |= pad << 16;
412
413 val = pad;
414 if (len > num * 4)
415 len = num * 4;
416 for (i = 0; i < len; i++) {
417 if ((i % 4) == 0)
418 val = pad;
419 val = msg[i] + (val << 8);
420 if ((i % 4) == 3) {
421 *buf++ = val;
422 val = pad;
423 num--;
424 }
425 }
426 if (--num >= 0)
427 *buf++ = val;
428 while (--num >= 0)
429 *buf++ = pad;
430
431 }
432
433 /**
434 * Return hash value of directory entry
435 * @param name dentry name
436 * @param len name lenth
437 * @return return on success hash value, errno on failure
438 */
__f2fs_dentry_hash(const unsigned char * name,int len)439 static f2fs_hash_t __f2fs_dentry_hash(const unsigned char *name, int len)/* Need update */
440 {
441 __u32 hash;
442 f2fs_hash_t f2fs_hash;
443 const unsigned char *p;
444 __u32 in[8], buf[4];
445
446 /* special hash codes for special dentries */
447 if ((len <= 2) && (name[0] == '.') &&
448 (name[1] == '.' || name[1] == '\0'))
449 return 0;
450
451 /* Initialize the default seed for the hash checksum functions */
452 buf[0] = 0x67452301;
453 buf[1] = 0xefcdab89;
454 buf[2] = 0x98badcfe;
455 buf[3] = 0x10325476;
456
457 p = name;
458 while (1) {
459 str2hashbuf(p, len, in, 4);
460 TEA_transform(buf, in);
461 p += 16;
462 if (len <= 16)
463 break;
464 len -= 16;
465 }
466 hash = buf[0];
467
468 f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
469 return f2fs_hash;
470 }
471
f2fs_dentry_hash(int encoding,int casefolded,const unsigned char * name,int len)472 f2fs_hash_t f2fs_dentry_hash(int encoding, int casefolded,
473 const unsigned char *name, int len)
474 {
475 const struct f2fs_nls_table *table = f2fs_load_nls_table(encoding);
476 int r, dlen;
477 unsigned char *buff;
478
479 if (len && casefolded) {
480 buff = malloc(sizeof(char) * PATH_MAX);
481 if (!buff)
482 return -ENOMEM;
483 dlen = table->ops->casefold(table, name, len, buff, PATH_MAX);
484 if (dlen < 0) {
485 free(buff);
486 goto opaque_seq;
487 }
488 r = __f2fs_dentry_hash(buff, dlen);
489
490 free(buff);
491 return r;
492 }
493 opaque_seq:
494 return __f2fs_dentry_hash(name, len);
495 }
496
addrs_per_inode(struct f2fs_inode * i)497 unsigned int addrs_per_inode(struct f2fs_inode *i)
498 {
499 unsigned int addrs = CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
500
501 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
502 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
503 return addrs;
504 return ALIGN_DOWN(addrs, 1 << i->i_log_cluster_size);
505 }
506
addrs_per_block(struct f2fs_inode * i)507 unsigned int addrs_per_block(struct f2fs_inode *i)
508 {
509 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
510 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
511 return DEF_ADDRS_PER_BLOCK;
512 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1 << i->i_log_cluster_size);
513 }
514
f2fs_max_file_offset(struct f2fs_inode * i)515 unsigned int f2fs_max_file_offset(struct f2fs_inode *i)
516 {
517 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
518 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
519 return le64_to_cpu(i->i_size);
520 return ALIGN_UP(le64_to_cpu(i->i_size), 1 << i->i_log_cluster_size);
521 }
522
523 /*
524 * CRC32
525 */
526 #define CRCPOLY_LE 0xedb88320
527
f2fs_cal_crc32(uint32_t crc,void * buf,int len)528 uint32_t f2fs_cal_crc32(uint32_t crc, void *buf, int len)
529 {
530 int i;
531 unsigned char *p = (unsigned char *)buf;
532 while (len--) {
533 crc ^= *p++;
534 for (i = 0; i < 8; i++)
535 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
536 }
537 return crc;
538 }
539
f2fs_crc_valid(uint32_t blk_crc,void * buf,int len)540 int f2fs_crc_valid(uint32_t blk_crc, void *buf, int len)
541 {
542 uint32_t cal_crc = 0;
543
544 cal_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, buf, len);
545
546 if (cal_crc != blk_crc) {
547 DBG(0,"CRC validation failed: cal_crc = %u, "
548 "blk_crc = %u buff_size = 0x%x\n",
549 cal_crc, blk_crc, len);
550 return -1;
551 }
552 return 0;
553 }
554
f2fs_inode_chksum(struct f2fs_node * node)555 __u32 f2fs_inode_chksum(struct f2fs_node *node)
556 {
557 struct f2fs_inode *ri = &node->i;
558 __le32 ino = node->footer.ino;
559 __le32 gen = ri->i_generation;
560 __u32 chksum, chksum_seed;
561 __u32 dummy_cs = 0;
562 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
563 unsigned int cs_size = sizeof(dummy_cs);
564
565 chksum = f2fs_cal_crc32(c.chksum_seed, (__u8 *)&ino,
566 sizeof(ino));
567 chksum_seed = f2fs_cal_crc32(chksum, (__u8 *)&gen, sizeof(gen));
568
569 chksum = f2fs_cal_crc32(chksum_seed, (__u8 *)ri, offset);
570 chksum = f2fs_cal_crc32(chksum, (__u8 *)&dummy_cs, cs_size);
571 offset += cs_size;
572 chksum = f2fs_cal_crc32(chksum, (__u8 *)ri + offset,
573 F2FS_BLKSIZE - offset);
574 return chksum;
575 }
576
f2fs_checkpoint_chksum(struct f2fs_checkpoint * cp)577 __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *cp)
578 {
579 unsigned int chksum_ofs = le32_to_cpu(cp->checksum_offset);
580 __u32 chksum;
581
582 chksum = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, chksum_ofs);
583 if (chksum_ofs < CP_CHKSUM_OFFSET) {
584 chksum_ofs += sizeof(chksum);
585 chksum = f2fs_cal_crc32(chksum, (__u8 *)cp + chksum_ofs,
586 F2FS_BLKSIZE - chksum_ofs);
587 }
588 return chksum;
589 }
590
write_inode(struct f2fs_node * inode,u64 blkaddr)591 int write_inode(struct f2fs_node *inode, u64 blkaddr)
592 {
593 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
594 inode->i.i_inode_checksum =
595 cpu_to_le32(f2fs_inode_chksum(inode));
596 return dev_write_block(inode, blkaddr);
597 }
598
599 /*
600 * try to identify the root device
601 */
get_rootdev()602 char *get_rootdev()
603 {
604 #if defined(_WIN32) || defined(WITH_ANDROID)
605 return NULL;
606 #else
607 struct stat sb;
608 int fd, ret;
609 char buf[PATH_MAX + 1];
610 char *uevent, *ptr;
611 char *rootdev;
612
613 if (stat("/", &sb) == -1)
614 return NULL;
615
616 snprintf(buf, PATH_MAX, "/sys/dev/block/%u:%u/uevent",
617 major(sb.st_dev), minor(sb.st_dev));
618
619 fd = open(buf, O_RDONLY);
620
621 if (fd < 0)
622 return NULL;
623
624 ret = lseek(fd, (off_t)0, SEEK_END);
625 (void)lseek(fd, (off_t)0, SEEK_SET);
626
627 if (ret == -1) {
628 close(fd);
629 return NULL;
630 }
631
632 uevent = malloc(ret + 1);
633 ASSERT(uevent);
634
635 uevent[ret] = '\0';
636
637 ret = read(fd, uevent, ret);
638 close(fd);
639
640 ptr = strstr(uevent, "DEVNAME");
641 if (!ptr)
642 goto out_free;
643
644 ret = sscanf(ptr, "DEVNAME=%s\n", buf);
645 if (strlen(buf) == 0)
646 goto out_free;
647
648 ret = strlen(buf) + 5;
649 rootdev = malloc(ret + 1);
650 if (!rootdev)
651 goto out_free;
652 rootdev[ret] = '\0';
653
654 snprintf(rootdev, ret + 1, "/dev/%s", buf);
655 free(uevent);
656 return rootdev;
657
658 out_free:
659 free(uevent);
660 return NULL;
661 #endif
662 }
663
664 /*
665 * device information
666 */
f2fs_init_configuration(void)667 void f2fs_init_configuration(void)
668 {
669 int i;
670
671 memset(&c, 0, sizeof(struct f2fs_configuration));
672 c.ndevs = 1;
673 c.sectors_per_blk = DEFAULT_SECTORS_PER_BLOCK;
674 c.blks_per_seg = DEFAULT_BLOCKS_PER_SEGMENT;
675 c.wanted_total_sectors = -1;
676 c.wanted_sector_size = -1;
677 #ifndef WITH_ANDROID
678 c.preserve_limits = 1;
679 c.no_kernel_check = 1;
680 #else
681 c.no_kernel_check = 0;
682 #endif
683
684 for (i = 0; i < MAX_DEVICES; i++) {
685 c.devices[i].fd = -1;
686 c.devices[i].sector_size = DEFAULT_SECTOR_SIZE;
687 c.devices[i].end_blkaddr = -1;
688 c.devices[i].zoned_model = F2FS_ZONED_NONE;
689 }
690
691 /* calculated by overprovision ratio */
692 c.segs_per_sec = 1;
693 c.secs_per_zone = 1;
694 c.segs_per_zone = 1;
695 c.vol_label = "";
696 c.trim = 1;
697 c.kd = -1;
698 c.fixed_time = -1;
699 c.s_encoding = 0;
700 c.s_encoding_flags = 0;
701
702 /* default root owner */
703 c.root_uid = getuid();
704 c.root_gid = getgid();
705 }
706
f2fs_dev_is_writable(void)707 int f2fs_dev_is_writable(void)
708 {
709 return !c.ro || c.force;
710 }
711
712 #ifdef HAVE_SETMNTENT
is_mounted(const char * mpt,const char * device)713 static int is_mounted(const char *mpt, const char *device)
714 {
715 FILE *file = NULL;
716 struct mntent *mnt = NULL;
717
718 file = setmntent(mpt, "r");
719 if (file == NULL)
720 return 0;
721
722 while ((mnt = getmntent(file)) != NULL) {
723 if (!strcmp(device, mnt->mnt_fsname)) {
724 #ifdef MNTOPT_RO
725 if (hasmntopt(mnt, MNTOPT_RO))
726 c.ro = 1;
727 #endif
728 break;
729 }
730 }
731 endmntent(file);
732 return mnt ? 1 : 0;
733 }
734 #endif
735
f2fs_dev_is_umounted(char * path)736 int f2fs_dev_is_umounted(char *path)
737 {
738 #ifdef _WIN32
739 return 0;
740 #else
741 struct stat *st_buf;
742 int is_rootdev = 0;
743 int ret = 0;
744 char *rootdev_name = get_rootdev();
745
746 if (rootdev_name) {
747 if (!strcmp(path, rootdev_name))
748 is_rootdev = 1;
749 free(rootdev_name);
750 }
751
752 /*
753 * try with /proc/mounts fist to detect RDONLY.
754 * f2fs_stop_checkpoint makes RO in /proc/mounts while RW in /etc/mtab.
755 */
756 #ifdef __linux__
757 ret = is_mounted("/proc/mounts", path);
758 if (ret) {
759 MSG(0, "Info: Mounted device!\n");
760 return -1;
761 }
762 #endif
763 #if defined(MOUNTED) || defined(_PATH_MOUNTED)
764 #ifndef MOUNTED
765 #define MOUNTED _PATH_MOUNTED
766 #endif
767 ret = is_mounted(MOUNTED, path);
768 if (ret) {
769 MSG(0, "Info: Mounted device!\n");
770 return -1;
771 }
772 #endif
773 /*
774 * If we are supposed to operate on the root device, then
775 * also check the mounts for '/dev/root', which sometimes
776 * functions as an alias for the root device.
777 */
778 if (is_rootdev) {
779 #ifdef __linux__
780 ret = is_mounted("/proc/mounts", "/dev/root");
781 if (ret) {
782 MSG(0, "Info: Mounted device!\n");
783 return -1;
784 }
785 #endif
786 }
787
788 /*
789 * If f2fs is umounted with -l, the process can still use
790 * the file system. In this case, we should not format.
791 */
792 st_buf = malloc(sizeof(struct stat));
793 ASSERT(st_buf);
794
795 if (stat(path, st_buf) == 0 && S_ISBLK(st_buf->st_mode)) {
796 int fd = open(path, O_RDONLY | O_EXCL);
797
798 if (fd >= 0) {
799 close(fd);
800 } else if (errno == EBUSY) {
801 MSG(0, "\tError: In use by the system!\n");
802 free(st_buf);
803 return -1;
804 }
805 }
806 free(st_buf);
807 return ret;
808 #endif
809 }
810
f2fs_devs_are_umounted(void)811 int f2fs_devs_are_umounted(void)
812 {
813 int i;
814
815 for (i = 0; i < c.ndevs; i++)
816 if (f2fs_dev_is_umounted((char *)c.devices[i].path))
817 return -1;
818 return 0;
819 }
820
get_kernel_version(__u8 * version)821 void get_kernel_version(__u8 *version)
822 {
823 int i;
824 for (i = 0; i < VERSION_NAME_LEN; i++) {
825 if (version[i] == '\n')
826 break;
827 }
828 memset(version + i, 0, VERSION_LEN + 1 - i);
829 }
830
get_kernel_uname_version(__u8 * version)831 void get_kernel_uname_version(__u8 *version)
832 {
833 #ifdef HAVE_SYS_UTSNAME_H
834 struct utsname buf;
835
836 memset(version, 0, VERSION_LEN);
837 if (uname(&buf))
838 return;
839
840 #if defined(WITH_KERNEL_VERSION)
841 snprintf((char *)version,
842 VERSION_NAME_LEN, "%s %s", buf.release, buf.version);
843 #else
844 snprintf((char *)version,
845 VERSION_NAME_LEN, "%s", buf.release);
846 #endif
847 #else
848 memset(version, 0, VERSION_LEN);
849 #endif
850 }
851
852 #if defined(__linux__) && defined(_IO) && !defined(BLKGETSIZE)
853 #define BLKGETSIZE _IO(0x12,96)
854 #endif
855
856 #if defined(__linux__) && defined(_IOR) && !defined(BLKGETSIZE64)
857 #define BLKGETSIZE64 _IOR(0x12,114, size_t)
858 #endif
859
860 #if defined(__linux__) && defined(_IO) && !defined(BLKSSZGET)
861 #define BLKSSZGET _IO(0x12,104)
862 #endif
863
864 #if defined(__APPLE__)
865 #include <sys/disk.h>
866 #define BLKGETSIZE DKIOCGETBLOCKCOUNT
867 #define BLKSSZGET DKIOCGETBLOCKCOUNT
868 #endif /* APPLE_DARWIN */
869
870 #ifndef _WIN32
open_check_fs(char * path,int flag)871 static int open_check_fs(char *path, int flag)
872 {
873 if (c.func != DUMP && (c.func != FSCK || c.fix_on || c.auto_fix))
874 return -1;
875
876 /* allow to open ro */
877 return open(path, O_RDONLY | flag);
878 }
879
880 #ifdef __linux__
is_power_of_2(unsigned long n)881 static int is_power_of_2(unsigned long n)
882 {
883 return (n != 0 && ((n & (n - 1)) == 0));
884 }
885 #endif
886
get_device_info(int i)887 int get_device_info(int i)
888 {
889 int32_t fd = 0;
890 uint32_t sector_size;
891 #ifndef BLKGETSIZE64
892 uint32_t total_sectors;
893 #endif
894 struct stat *stat_buf;
895 #ifdef HDIO_GETGIO
896 struct hd_geometry geom;
897 #endif
898 #if !defined(WITH_ANDROID) && defined(__linux__)
899 sg_io_hdr_t io_hdr;
900 unsigned char reply_buffer[96] = {0};
901 unsigned char model_inq[6] = {MODELINQUIRY};
902 #endif
903 struct device_info *dev = c.devices + i;
904
905 if (c.sparse_mode) {
906 fd = open(dev->path, O_RDWR | O_CREAT | O_BINARY, 0644);
907 if (fd < 0) {
908 fd = open_check_fs(dev->path, O_BINARY);
909 if (fd < 0) {
910 MSG(0, "\tError: Failed to open a sparse file!\n");
911 return -1;
912 }
913 }
914 }
915
916 stat_buf = malloc(sizeof(struct stat));
917 ASSERT(stat_buf);
918
919 if (!c.sparse_mode) {
920 if (stat(dev->path, stat_buf) < 0 ) {
921 MSG(0, "\tError: Failed to get the device stat!\n");
922 free(stat_buf);
923 return -1;
924 }
925
926 if (S_ISBLK(stat_buf->st_mode) &&
927 !c.force && c.func != DUMP && !c.dry_run) {
928 fd = open(dev->path, O_RDWR | O_EXCL);
929 if (fd < 0)
930 fd = open_check_fs(dev->path, O_EXCL);
931 } else {
932 fd = open(dev->path, O_RDWR);
933 if (fd < 0)
934 fd = open_check_fs(dev->path, 0);
935 }
936 }
937 if (fd < 0) {
938 MSG(0, "\tError: Failed to open the device!\n");
939 free(stat_buf);
940 return -1;
941 }
942
943 dev->fd = fd;
944
945 if (c.sparse_mode) {
946 if (f2fs_init_sparse_file()) {
947 free(stat_buf);
948 return -1;
949 }
950 }
951
952 if (c.kd == -1) {
953 #if !defined(WITH_ANDROID) && defined(__linux__)
954 c.kd = open("/proc/version", O_RDONLY);
955 #endif
956 if (c.kd < 0) {
957 MSG(0, "Info: not exist /proc/version!\n");
958 c.kd = -2;
959 }
960 }
961
962 if (c.sparse_mode) {
963 dev->total_sectors = c.device_size / dev->sector_size;
964 } else if (S_ISREG(stat_buf->st_mode)) {
965 dev->total_sectors = stat_buf->st_size / dev->sector_size;
966 } else if (S_ISBLK(stat_buf->st_mode)) {
967 #ifdef BLKSSZGET
968 if (ioctl(fd, BLKSSZGET, §or_size) < 0)
969 MSG(0, "\tError: Using the default sector size\n");
970 else if (dev->sector_size < sector_size)
971 dev->sector_size = sector_size;
972 #endif
973 #ifdef BLKGETSIZE64
974 if (ioctl(fd, BLKGETSIZE64, &dev->total_sectors) < 0) {
975 MSG(0, "\tError: Cannot get the device size\n");
976 free(stat_buf);
977 return -1;
978 }
979 #else
980 if (ioctl(fd, BLKGETSIZE, &total_sectors) < 0) {
981 MSG(0, "\tError: Cannot get the device size\n");
982 free(stat_buf);
983 return -1;
984 }
985 dev->total_sectors = total_sectors;
986 #endif
987 dev->total_sectors /= dev->sector_size;
988
989 if (i == 0) {
990 #ifdef HDIO_GETGIO
991 if (ioctl(fd, HDIO_GETGEO, &geom) < 0)
992 c.start_sector = 0;
993 else
994 c.start_sector = geom.start;
995 #else
996 c.start_sector = 0;
997 #endif
998 }
999
1000 #if !defined(WITH_ANDROID) && defined(__linux__)
1001 /* Send INQUIRY command */
1002 memset(&io_hdr, 0, sizeof(sg_io_hdr_t));
1003 io_hdr.interface_id = 'S';
1004 io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
1005 io_hdr.dxfer_len = sizeof(reply_buffer);
1006 io_hdr.dxferp = reply_buffer;
1007 io_hdr.cmd_len = sizeof(model_inq);
1008 io_hdr.cmdp = model_inq;
1009 io_hdr.timeout = 1000;
1010
1011 if (!ioctl(fd, SG_IO, &io_hdr)) {
1012 MSG(0, "Info: [%s] Disk Model: %.16s\n",
1013 dev->path, reply_buffer+16);
1014 }
1015 #endif
1016 } else {
1017 MSG(0, "\tError: Volume type is not supported!!!\n");
1018 free(stat_buf);
1019 return -1;
1020 }
1021
1022 if (!c.sector_size) {
1023 c.sector_size = dev->sector_size;
1024 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1025 } else if (c.sector_size != c.devices[i].sector_size) {
1026 MSG(0, "\tError: Different sector sizes!!!\n");
1027 free(stat_buf);
1028 return -1;
1029 }
1030
1031 #ifdef __linux__
1032 if (S_ISBLK(stat_buf->st_mode)) {
1033 if (f2fs_get_zoned_model(i) < 0) {
1034 free(stat_buf);
1035 return -1;
1036 }
1037 }
1038
1039 if (dev->zoned_model != F2FS_ZONED_NONE) {
1040
1041 /* Get the number of blocks per zones */
1042 if (f2fs_get_zone_blocks(i)) {
1043 MSG(0, "\tError: Failed to get number of blocks per zone\n");
1044 free(stat_buf);
1045 return -1;
1046 }
1047
1048 if (!is_power_of_2(dev->zone_size))
1049 MSG(0, "Info: zoned: zone size %" PRIu64 "u (not a power of 2)\n",
1050 dev->zone_size);
1051
1052 /*
1053 * Check zone configuration: for the first disk of a
1054 * multi-device volume, conventional zones are needed.
1055 */
1056 if (f2fs_check_zones(i)) {
1057 MSG(0, "\tError: Failed to check zone configuration\n");
1058 free(stat_buf);
1059 return -1;
1060 }
1061 MSG(0, "Info: Host-%s zoned block device:\n",
1062 (dev->zoned_model == F2FS_ZONED_HA) ?
1063 "aware" : "managed");
1064 MSG(0, " %u zones, %" PRIu64 "u zone size(bytes), %u randomly writeable zones\n",
1065 dev->nr_zones, dev->zone_size,
1066 dev->nr_rnd_zones);
1067 MSG(0, " %zu blocks per zone\n",
1068 dev->zone_blocks);
1069 }
1070 #endif
1071 /* adjust wanted_total_sectors */
1072 if (c.wanted_total_sectors != -1) {
1073 MSG(0, "Info: wanted sectors = %"PRIu64" (in %"PRIu64" bytes)\n",
1074 c.wanted_total_sectors, c.wanted_sector_size);
1075 if (c.wanted_sector_size == -1) {
1076 c.wanted_sector_size = dev->sector_size;
1077 } else if (dev->sector_size != c.wanted_sector_size) {
1078 c.wanted_total_sectors *= c.wanted_sector_size;
1079 c.wanted_total_sectors /= dev->sector_size;
1080 }
1081 }
1082
1083 c.total_sectors += dev->total_sectors;
1084 free(stat_buf);
1085 return 0;
1086 }
1087
1088 #else
1089
1090 #include "windows.h"
1091 #include "winioctl.h"
1092
1093 #if (_WIN32_WINNT >= 0x0500)
1094 #define HAVE_GET_FILE_SIZE_EX 1
1095 #endif
1096
win_get_device_size(const char * file,uint64_t * device_size)1097 static int win_get_device_size(const char *file, uint64_t *device_size)
1098 {
1099 HANDLE dev;
1100 PARTITION_INFORMATION pi;
1101 DISK_GEOMETRY gi;
1102 DWORD retbytes;
1103 #ifdef HAVE_GET_FILE_SIZE_EX
1104 LARGE_INTEGER filesize;
1105 #else
1106 DWORD filesize;
1107 #endif /* HAVE_GET_FILE_SIZE_EX */
1108
1109 dev = CreateFile(file, GENERIC_READ,
1110 FILE_SHARE_READ | FILE_SHARE_WRITE ,
1111 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
1112
1113 if (dev == INVALID_HANDLE_VALUE)
1114 return EBADF;
1115 if (DeviceIoControl(dev, IOCTL_DISK_GET_PARTITION_INFO,
1116 &pi, sizeof(PARTITION_INFORMATION),
1117 &pi, sizeof(PARTITION_INFORMATION),
1118 &retbytes, NULL)) {
1119
1120 *device_size = pi.PartitionLength.QuadPart;
1121
1122 } else if (DeviceIoControl(dev, IOCTL_DISK_GET_DRIVE_GEOMETRY,
1123 &gi, sizeof(DISK_GEOMETRY),
1124 &gi, sizeof(DISK_GEOMETRY),
1125 &retbytes, NULL)) {
1126
1127 *device_size = gi.BytesPerSector *
1128 gi.SectorsPerTrack *
1129 gi.TracksPerCylinder *
1130 gi.Cylinders.QuadPart;
1131
1132 #ifdef HAVE_GET_FILE_SIZE_EX
1133 } else if (GetFileSizeEx(dev, &filesize)) {
1134 *device_size = filesize.QuadPart;
1135 }
1136 #else
1137 } else {
1138 filesize = GetFileSize(dev, NULL);
1139 if (INVALID_FILE_SIZE != filesize)
1140 return -1;
1141 *device_size = filesize;
1142 }
1143 #endif /* HAVE_GET_FILE_SIZE_EX */
1144
1145 CloseHandle(dev);
1146 return 0;
1147 }
1148
get_device_info(int i)1149 int get_device_info(int i)
1150 {
1151 struct device_info *dev = c.devices + i;
1152 uint64_t device_size = 0;
1153 int32_t fd = 0;
1154
1155 /* Block device target is not supported on Windows. */
1156 if (!c.sparse_mode) {
1157 if (win_get_device_size(dev->path, &device_size)) {
1158 MSG(0, "\tError: Failed to get device size!\n");
1159 return -1;
1160 }
1161 } else {
1162 device_size = c.device_size;
1163 }
1164 if (c.sparse_mode) {
1165 fd = open((char *)dev->path, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
1166 } else {
1167 fd = open((char *)dev->path, O_RDWR | O_BINARY);
1168 }
1169 if (fd < 0) {
1170 MSG(0, "\tError: Failed to open the device!\n");
1171 return -1;
1172 }
1173 dev->fd = fd;
1174 dev->total_sectors = device_size / dev->sector_size;
1175 c.start_sector = 0;
1176 c.sector_size = dev->sector_size;
1177 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1178 c.total_sectors += dev->total_sectors;
1179
1180 if (c.sparse_mode && f2fs_init_sparse_file())
1181 return -1;
1182 return 0;
1183 }
1184 #endif
1185
f2fs_get_device_info(void)1186 int f2fs_get_device_info(void)
1187 {
1188 int i;
1189
1190 for (i = 0; i < c.ndevs; i++)
1191 if (get_device_info(i))
1192 return -1;
1193 return 0;
1194 }
1195
f2fs_get_f2fs_info(void)1196 int f2fs_get_f2fs_info(void)
1197 {
1198 int i;
1199
1200 if (c.wanted_total_sectors < c.total_sectors) {
1201 MSG(0, "Info: total device sectors = %"PRIu64" (in %u bytes)\n",
1202 c.total_sectors, c.sector_size);
1203 c.total_sectors = c.wanted_total_sectors;
1204 c.devices[0].total_sectors = c.total_sectors;
1205 }
1206 if (c.total_sectors * c.sector_size >
1207 (uint64_t)F2FS_MAX_SEGMENT * 2 * 1024 * 1024) {
1208 MSG(0, "\tError: F2FS can support 16TB at most!!!\n");
1209 return -1;
1210 }
1211
1212 /*
1213 * Check device types and determine the final volume operation mode:
1214 * - If all devices are regular block devices, default operation.
1215 * - If at least one HM device is found, operate in HM mode (BLKZONED
1216 * feature will be enabled by mkfs).
1217 * - If an HA device is found, let mkfs decide based on the -m option
1218 * setting by the user.
1219 */
1220 c.zoned_model = F2FS_ZONED_NONE;
1221 for (i = 0; i < c.ndevs; i++) {
1222 switch (c.devices[i].zoned_model) {
1223 case F2FS_ZONED_NONE:
1224 continue;
1225 case F2FS_ZONED_HM:
1226 c.zoned_model = F2FS_ZONED_HM;
1227 break;
1228 case F2FS_ZONED_HA:
1229 if (c.zoned_model != F2FS_ZONED_HM)
1230 c.zoned_model = F2FS_ZONED_HA;
1231 break;
1232 }
1233 }
1234
1235 if (c.zoned_model != F2FS_ZONED_NONE) {
1236
1237 /*
1238 * For zoned model, the zones sizes of all zoned devices must
1239 * be equal.
1240 */
1241 for (i = 0; i < c.ndevs; i++) {
1242 if (c.devices[i].zoned_model == F2FS_ZONED_NONE)
1243 continue;
1244 if (c.zone_blocks &&
1245 c.zone_blocks != c.devices[i].zone_blocks) {
1246 MSG(0, "\tError: zones of different size are "
1247 "not supported\n");
1248 return -1;
1249 }
1250 c.zone_blocks = c.devices[i].zone_blocks;
1251 }
1252
1253 /*
1254 * Align sections to the device zone size and align F2FS zones
1255 * to the device zones. For F2FS_ZONED_HA model without the
1256 * BLKZONED feature set at format time, this is only an
1257 * optimization as sequential writes will not be enforced.
1258 */
1259 c.segs_per_sec = c.zone_blocks / DEFAULT_BLOCKS_PER_SEGMENT;
1260 c.secs_per_zone = 1;
1261 } else {
1262 if(c.zoned_mode != 0) {
1263 MSG(0, "\n Error: %s may not be a zoned block device \n",
1264 c.devices[0].path);
1265 return -1;
1266 }
1267 }
1268
1269 c.segs_per_zone = c.segs_per_sec * c.secs_per_zone;
1270
1271 if (c.func != MKFS)
1272 return 0;
1273
1274 MSG(0, "Info: Segments per section = %d\n", c.segs_per_sec);
1275 MSG(0, "Info: Sections per zone = %d\n", c.secs_per_zone);
1276 MSG(0, "Info: sector size = %u\n", c.sector_size);
1277 MSG(0, "Info: total sectors = %"PRIu64" (%"PRIu64" MB)\n",
1278 c.total_sectors, (c.total_sectors *
1279 (c.sector_size >> 9)) >> 11);
1280 return 0;
1281 }
1282
calc_extra_isize(void)1283 unsigned int calc_extra_isize(void)
1284 {
1285 unsigned int size = offsetof(struct f2fs_inode, i_projid);
1286
1287 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
1288 size = offsetof(struct f2fs_inode, i_projid);
1289 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1290 size = offsetof(struct f2fs_inode, i_inode_checksum);
1291 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
1292 size = offsetof(struct f2fs_inode, i_crtime);
1293 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME))
1294 size = offsetof(struct f2fs_inode, i_compr_blocks);
1295 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION))
1296 size = offsetof(struct f2fs_inode, i_extra_end);
1297
1298 return size - F2FS_EXTRA_ISIZE_OFFSET;
1299 }
1300
1301 #define ARRAY_SIZE(array) \
1302 (sizeof(array) / sizeof(array[0]))
1303
1304 static const struct {
1305 char *name;
1306 __u16 encoding_magic;
1307 __u16 default_flags;
1308
1309 } f2fs_encoding_map[] = {
1310 {
1311 .encoding_magic = F2FS_ENC_UTF8_12_1,
1312 .name = "utf8",
1313 .default_flags = 0,
1314 },
1315 };
1316
1317 static const struct enc_flags {
1318 __u16 flag;
1319 char *param;
1320 } encoding_flags[] = {
1321 { F2FS_ENC_STRICT_MODE_FL, "strict" },
1322 };
1323
1324 /* Return a positive number < 0xff indicating the encoding magic number
1325 * or a negative value indicating error. */
f2fs_str2encoding(const char * string)1326 int f2fs_str2encoding(const char *string)
1327 {
1328 int i;
1329
1330 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1331 if (!strcmp(string, f2fs_encoding_map[i].name))
1332 return f2fs_encoding_map[i].encoding_magic;
1333
1334 return -EINVAL;
1335 }
1336
f2fs_encoding2str(const int encoding)1337 char *f2fs_encoding2str(const int encoding)
1338 {
1339 int i;
1340
1341 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1342 if (f2fs_encoding_map[i].encoding_magic == encoding)
1343 return f2fs_encoding_map[i].name;
1344
1345 return NULL;
1346 }
1347
f2fs_get_encoding_flags(int encoding)1348 int f2fs_get_encoding_flags(int encoding)
1349 {
1350 int i;
1351
1352 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1353 if (f2fs_encoding_map[i].encoding_magic == encoding)
1354 return f2fs_encoding_map[encoding].default_flags;
1355
1356 return 0;
1357 }
1358
f2fs_str2encoding_flags(char ** param,__u16 * flags)1359 int f2fs_str2encoding_flags(char **param, __u16 *flags)
1360 {
1361 char *f = strtok(*param, ",");
1362 const struct enc_flags *fl;
1363 int i, neg = 0;
1364
1365 while (f) {
1366 neg = 0;
1367 if (!strncmp("no", f, 2)) {
1368 neg = 1;
1369 f += 2;
1370 }
1371
1372 for (i = 0; i < ARRAY_SIZE(encoding_flags); i++) {
1373 fl = &encoding_flags[i];
1374 if (!strcmp(fl->param, f)) {
1375 if (neg) {
1376 MSG(0, "Sub %s\n", fl->param);
1377 *flags &= ~fl->flag;
1378 } else {
1379 MSG(0, "Add %s\n", fl->param);
1380 *flags |= fl->flag;
1381 }
1382
1383 goto next_flag;
1384 }
1385 }
1386 *param = f;
1387 return -EINVAL;
1388 next_flag:
1389 f = strtok(NULL, ":");
1390 }
1391 return 0;
1392 }
1393