1 /**
2 * libf2fs.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * Dual licensed under the GPL or LGPL version 2 licenses.
8 */
9 #define _LARGEFILE64_SOURCE
10 #define _FILE_OFFSET_BITS 64
11
12 #include <f2fs_fs.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <errno.h>
17 #include <unistd.h>
18 #include <fcntl.h>
19 #include <libgen.h>
20 #ifdef HAVE_MNTENT_H
21 #include <mntent.h>
22 #endif
23 #include <time.h>
24 #include <sys/stat.h>
25 #ifndef ANDROID_WINDOWS_HOST
26 #include <sys/mount.h>
27 #include <sys/ioctl.h>
28 #endif
29 #ifdef HAVE_SYS_SYSMACROS_H
30 #include <sys/sysmacros.h>
31 #endif
32 #ifdef HAVE_SYS_UTSNAME_H
33 #include <sys/utsname.h>
34 #endif
35 #ifndef WITH_ANDROID
36 #ifdef HAVE_SCSI_SG_H
37 #include <scsi/sg.h>
38 #endif
39 #endif
40 #ifdef HAVE_LINUX_HDREG_H
41 #include <linux/hdreg.h>
42 #endif
43 #ifdef HAVE_LINUX_LIMITS_H
44 #include <linux/limits.h>
45 #endif
46
47 #ifndef WITH_ANDROID
48 /* SCSI command for standard inquiry*/
49 #define MODELINQUIRY 0x12,0x00,0x00,0x00,0x4A,0x00
50 #endif
51
52 #ifndef ANDROID_WINDOWS_HOST /* O_BINARY is windows-specific flag */
53 #define O_BINARY 0
54 #else
55 /* On Windows, wchar_t is 8 bit sized and it causes compilation errors. */
56 #define wchar_t int
57 #endif
58
59 /*
60 * UTF conversion codes are Copied from exfat tools.
61 */
utf8_to_wchar(const char * input,wchar_t * wc,size_t insize)62 static const char *utf8_to_wchar(const char *input, wchar_t *wc,
63 size_t insize)
64 {
65 if ((input[0] & 0x80) == 0 && insize >= 1) {
66 *wc = (wchar_t) input[0];
67 return input + 1;
68 }
69 if ((input[0] & 0xe0) == 0xc0 && insize >= 2) {
70 *wc = (((wchar_t) input[0] & 0x1f) << 6) |
71 ((wchar_t) input[1] & 0x3f);
72 return input + 2;
73 }
74 if ((input[0] & 0xf0) == 0xe0 && insize >= 3) {
75 *wc = (((wchar_t) input[0] & 0x0f) << 12) |
76 (((wchar_t) input[1] & 0x3f) << 6) |
77 ((wchar_t) input[2] & 0x3f);
78 return input + 3;
79 }
80 if ((input[0] & 0xf8) == 0xf0 && insize >= 4) {
81 *wc = (((wchar_t) input[0] & 0x07) << 18) |
82 (((wchar_t) input[1] & 0x3f) << 12) |
83 (((wchar_t) input[2] & 0x3f) << 6) |
84 ((wchar_t) input[3] & 0x3f);
85 return input + 4;
86 }
87 if ((input[0] & 0xfc) == 0xf8 && insize >= 5) {
88 *wc = (((wchar_t) input[0] & 0x03) << 24) |
89 (((wchar_t) input[1] & 0x3f) << 18) |
90 (((wchar_t) input[2] & 0x3f) << 12) |
91 (((wchar_t) input[3] & 0x3f) << 6) |
92 ((wchar_t) input[4] & 0x3f);
93 return input + 5;
94 }
95 if ((input[0] & 0xfe) == 0xfc && insize >= 6) {
96 *wc = (((wchar_t) input[0] & 0x01) << 30) |
97 (((wchar_t) input[1] & 0x3f) << 24) |
98 (((wchar_t) input[2] & 0x3f) << 18) |
99 (((wchar_t) input[3] & 0x3f) << 12) |
100 (((wchar_t) input[4] & 0x3f) << 6) |
101 ((wchar_t) input[5] & 0x3f);
102 return input + 6;
103 }
104 return NULL;
105 }
106
wchar_to_utf16(u_int16_t * output,wchar_t wc,size_t outsize)107 static u_int16_t *wchar_to_utf16(u_int16_t *output, wchar_t wc, size_t outsize)
108 {
109 if (wc <= 0xffff) {
110 if (outsize == 0)
111 return NULL;
112 output[0] = cpu_to_le16(wc);
113 return output + 1;
114 }
115 if (outsize < 2)
116 return NULL;
117 wc -= 0x10000;
118 output[0] = cpu_to_le16(0xd800 | ((wc >> 10) & 0x3ff));
119 output[1] = cpu_to_le16(0xdc00 | (wc & 0x3ff));
120 return output + 2;
121 }
122
utf8_to_utf16(u_int16_t * output,const char * input,size_t outsize,size_t insize)123 int utf8_to_utf16(u_int16_t *output, const char *input, size_t outsize,
124 size_t insize)
125 {
126 const char *inp = input;
127 u_int16_t *outp = output;
128 wchar_t wc;
129
130 while ((size_t)(inp - input) < insize && *inp) {
131 inp = utf8_to_wchar(inp, &wc, insize - (inp - input));
132 if (inp == NULL) {
133 DBG(0, "illegal UTF-8 sequence\n");
134 return -EILSEQ;
135 }
136 outp = wchar_to_utf16(outp, wc, outsize - (outp - output));
137 if (outp == NULL) {
138 DBG(0, "name is too long\n");
139 return -ENAMETOOLONG;
140 }
141 }
142 *outp = cpu_to_le16(0);
143 return 0;
144 }
145
utf16_to_wchar(const u_int16_t * input,wchar_t * wc,size_t insize)146 static const u_int16_t *utf16_to_wchar(const u_int16_t *input, wchar_t *wc,
147 size_t insize)
148 {
149 if ((le16_to_cpu(input[0]) & 0xfc00) == 0xd800) {
150 if (insize < 2 || (le16_to_cpu(input[1]) & 0xfc00) != 0xdc00)
151 return NULL;
152 *wc = ((wchar_t) (le16_to_cpu(input[0]) & 0x3ff) << 10);
153 *wc |= (le16_to_cpu(input[1]) & 0x3ff);
154 *wc += 0x10000;
155 return input + 2;
156 } else {
157 *wc = le16_to_cpu(*input);
158 return input + 1;
159 }
160 }
161
wchar_to_utf8(char * output,wchar_t wc,size_t outsize)162 static char *wchar_to_utf8(char *output, wchar_t wc, size_t outsize)
163 {
164 if (wc <= 0x7f) {
165 if (outsize < 1)
166 return NULL;
167 *output++ = (char) wc;
168 } else if (wc <= 0x7ff) {
169 if (outsize < 2)
170 return NULL;
171 *output++ = 0xc0 | (wc >> 6);
172 *output++ = 0x80 | (wc & 0x3f);
173 } else if (wc <= 0xffff) {
174 if (outsize < 3)
175 return NULL;
176 *output++ = 0xe0 | (wc >> 12);
177 *output++ = 0x80 | ((wc >> 6) & 0x3f);
178 *output++ = 0x80 | (wc & 0x3f);
179 } else if (wc <= 0x1fffff) {
180 if (outsize < 4)
181 return NULL;
182 *output++ = 0xf0 | (wc >> 18);
183 *output++ = 0x80 | ((wc >> 12) & 0x3f);
184 *output++ = 0x80 | ((wc >> 6) & 0x3f);
185 *output++ = 0x80 | (wc & 0x3f);
186 } else if (wc <= 0x3ffffff) {
187 if (outsize < 5)
188 return NULL;
189 *output++ = 0xf8 | (wc >> 24);
190 *output++ = 0x80 | ((wc >> 18) & 0x3f);
191 *output++ = 0x80 | ((wc >> 12) & 0x3f);
192 *output++ = 0x80 | ((wc >> 6) & 0x3f);
193 *output++ = 0x80 | (wc & 0x3f);
194 } else if (wc <= 0x7fffffff) {
195 if (outsize < 6)
196 return NULL;
197 *output++ = 0xfc | (wc >> 30);
198 *output++ = 0x80 | ((wc >> 24) & 0x3f);
199 *output++ = 0x80 | ((wc >> 18) & 0x3f);
200 *output++ = 0x80 | ((wc >> 12) & 0x3f);
201 *output++ = 0x80 | ((wc >> 6) & 0x3f);
202 *output++ = 0x80 | (wc & 0x3f);
203 } else
204 return NULL;
205
206 return output;
207 }
208
utf16_to_utf8(char * output,const u_int16_t * input,size_t outsize,size_t insize)209 int utf16_to_utf8(char *output, const u_int16_t *input, size_t outsize,
210 size_t insize)
211 {
212 const u_int16_t *inp = input;
213 char *outp = output;
214 wchar_t wc;
215
216 while ((size_t)(inp - input) < insize && le16_to_cpu(*inp)) {
217 inp = utf16_to_wchar(inp, &wc, insize - (inp - input));
218 if (inp == NULL) {
219 DBG(0, "illegal UTF-16 sequence\n");
220 return -EILSEQ;
221 }
222 outp = wchar_to_utf8(outp, wc, outsize - (outp - output));
223 if (outp == NULL) {
224 DBG(0, "name is too long\n");
225 return -ENAMETOOLONG;
226 }
227 }
228 *outp = '\0';
229 return 0;
230 }
231
log_base_2(u_int32_t num)232 int log_base_2(u_int32_t num)
233 {
234 int ret = 0;
235 if (num <= 0 || (num & (num - 1)) != 0)
236 return -1;
237
238 while (num >>= 1)
239 ret++;
240 return ret;
241 }
242
243 /*
244 * f2fs bit operations
245 */
246 static const int bits_in_byte[256] = {
247 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
248 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
249 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
250 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
251 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
252 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
253 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
254 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
255 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
256 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
257 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
258 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
259 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
260 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
261 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
262 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
263 };
264
get_bits_in_byte(unsigned char n)265 int get_bits_in_byte(unsigned char n)
266 {
267 return bits_in_byte[n];
268 }
269
test_and_set_bit_le(u32 nr,u8 * addr)270 int test_and_set_bit_le(u32 nr, u8 *addr)
271 {
272 int mask, retval;
273
274 addr += nr >> 3;
275 mask = 1 << ((nr & 0x07));
276 retval = mask & *addr;
277 *addr |= mask;
278 return retval;
279 }
280
test_and_clear_bit_le(u32 nr,u8 * addr)281 int test_and_clear_bit_le(u32 nr, u8 *addr)
282 {
283 int mask, retval;
284
285 addr += nr >> 3;
286 mask = 1 << ((nr & 0x07));
287 retval = mask & *addr;
288 *addr &= ~mask;
289 return retval;
290 }
291
test_bit_le(u32 nr,const u8 * addr)292 int test_bit_le(u32 nr, const u8 *addr)
293 {
294 return ((1 << (nr & 7)) & (addr[nr >> 3]));
295 }
296
f2fs_test_bit(unsigned int nr,const char * p)297 int f2fs_test_bit(unsigned int nr, const char *p)
298 {
299 int mask;
300 char *addr = (char *)p;
301
302 addr += (nr >> 3);
303 mask = 1 << (7 - (nr & 0x07));
304 return (mask & *addr) != 0;
305 }
306
f2fs_set_bit(unsigned int nr,char * addr)307 int f2fs_set_bit(unsigned int nr, char *addr)
308 {
309 int mask;
310 int ret;
311
312 addr += (nr >> 3);
313 mask = 1 << (7 - (nr & 0x07));
314 ret = mask & *addr;
315 *addr |= mask;
316 return ret;
317 }
318
f2fs_clear_bit(unsigned int nr,char * addr)319 int f2fs_clear_bit(unsigned int nr, char *addr)
320 {
321 int mask;
322 int ret;
323
324 addr += (nr >> 3);
325 mask = 1 << (7 - (nr & 0x07));
326 ret = mask & *addr;
327 *addr &= ~mask;
328 return ret;
329 }
330
__ffs(u8 word)331 static inline u64 __ffs(u8 word)
332 {
333 int num = 0;
334
335 if ((word & 0xf) == 0) {
336 num += 4;
337 word >>= 4;
338 }
339 if ((word & 0x3) == 0) {
340 num += 2;
341 word >>= 2;
342 }
343 if ((word & 0x1) == 0)
344 num += 1;
345 return num;
346 }
347
348 /* Copied from linux/lib/find_bit.c */
349 #define BITMAP_FIRST_BYTE_MASK(start) (0xff << ((start) & (BITS_PER_BYTE - 1)))
350
_find_next_bit_le(const u8 * addr,u64 nbits,u64 start,char invert)351 static u64 _find_next_bit_le(const u8 *addr, u64 nbits, u64 start, char invert)
352 {
353 u8 tmp;
354
355 if (!nbits || start >= nbits)
356 return nbits;
357
358 tmp = addr[start / BITS_PER_BYTE] ^ invert;
359
360 /* Handle 1st word. */
361 tmp &= BITMAP_FIRST_BYTE_MASK(start);
362 start = round_down(start, BITS_PER_BYTE);
363
364 while (!tmp) {
365 start += BITS_PER_BYTE;
366 if (start >= nbits)
367 return nbits;
368
369 tmp = addr[start / BITS_PER_BYTE] ^ invert;
370 }
371
372 return min(start + __ffs(tmp), nbits);
373 }
374
find_next_bit_le(const u8 * addr,u64 size,u64 offset)375 u64 find_next_bit_le(const u8 *addr, u64 size, u64 offset)
376 {
377 return _find_next_bit_le(addr, size, offset, 0);
378 }
379
380
find_next_zero_bit_le(const u8 * addr,u64 size,u64 offset)381 u64 find_next_zero_bit_le(const u8 *addr, u64 size, u64 offset)
382 {
383 return _find_next_bit_le(addr, size, offset, 0xff);
384 }
385
386 /*
387 * Hashing code adapted from ext3
388 */
389 #define DELTA 0x9E3779B9
390
TEA_transform(unsigned int buf[4],unsigned int const in[])391 static void TEA_transform(unsigned int buf[4], unsigned int const in[])
392 {
393 __u32 sum = 0;
394 __u32 b0 = buf[0], b1 = buf[1];
395 __u32 a = in[0], b = in[1], c = in[2], d = in[3];
396 int n = 16;
397
398 do {
399 sum += DELTA;
400 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
401 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
402 } while (--n);
403
404 buf[0] += b0;
405 buf[1] += b1;
406
407 }
408
str2hashbuf(const unsigned char * msg,int len,unsigned int * buf,int num)409 static void str2hashbuf(const unsigned char *msg, int len,
410 unsigned int *buf, int num)
411 {
412 unsigned pad, val;
413 int i;
414
415 pad = (__u32)len | ((__u32)len << 8);
416 pad |= pad << 16;
417
418 val = pad;
419 if (len > num * 4)
420 len = num * 4;
421 for (i = 0; i < len; i++) {
422 if ((i % 4) == 0)
423 val = pad;
424 val = msg[i] + (val << 8);
425 if ((i % 4) == 3) {
426 *buf++ = val;
427 val = pad;
428 num--;
429 }
430 }
431 if (--num >= 0)
432 *buf++ = val;
433 while (--num >= 0)
434 *buf++ = pad;
435
436 }
437
438 /**
439 * Return hash value of directory entry
440 * @param name dentry name
441 * @param len name lenth
442 * @return return on success hash value, errno on failure
443 */
__f2fs_dentry_hash(const unsigned char * name,int len)444 static f2fs_hash_t __f2fs_dentry_hash(const unsigned char *name, int len)/* Need update */
445 {
446 __u32 hash;
447 f2fs_hash_t f2fs_hash;
448 const unsigned char *p;
449 __u32 in[8], buf[4];
450
451 /* special hash codes for special dentries */
452 if ((len <= 2) && (name[0] == '.') &&
453 (name[1] == '.' || name[1] == '\0'))
454 return 0;
455
456 /* Initialize the default seed for the hash checksum functions */
457 buf[0] = 0x67452301;
458 buf[1] = 0xefcdab89;
459 buf[2] = 0x98badcfe;
460 buf[3] = 0x10325476;
461
462 p = name;
463 while (1) {
464 str2hashbuf(p, len, in, 4);
465 TEA_transform(buf, in);
466 p += 16;
467 if (len <= 16)
468 break;
469 len -= 16;
470 }
471 hash = buf[0];
472
473 f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
474 return f2fs_hash;
475 }
476
f2fs_dentry_hash(int encoding,int casefolded,const unsigned char * name,int len)477 f2fs_hash_t f2fs_dentry_hash(int encoding, int casefolded,
478 const unsigned char *name, int len)
479 {
480 const struct f2fs_nls_table *table = f2fs_load_nls_table(encoding);
481 int r, dlen;
482 unsigned char *buff;
483
484 if (len && casefolded) {
485 buff = malloc(sizeof(char) * PATH_MAX);
486 if (!buff)
487 return -ENOMEM;
488 dlen = table->ops->casefold(table, name, len, buff, PATH_MAX);
489 if (dlen < 0) {
490 free(buff);
491 goto opaque_seq;
492 }
493 r = __f2fs_dentry_hash(buff, dlen);
494
495 free(buff);
496 return r;
497 }
498 opaque_seq:
499 return __f2fs_dentry_hash(name, len);
500 }
501
502 #define ALIGN_DOWN(addrs, size) (((addrs) / (size)) * (size))
addrs_per_inode(struct f2fs_inode * i)503 unsigned int addrs_per_inode(struct f2fs_inode *i)
504 {
505 unsigned int addrs = CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
506
507 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
508 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
509 return addrs;
510 return ALIGN_DOWN(addrs, 1 << i->i_log_cluster_size);
511 }
512
addrs_per_block(struct f2fs_inode * i)513 unsigned int addrs_per_block(struct f2fs_inode *i)
514 {
515 if (!LINUX_S_ISREG(le16_to_cpu(i->i_mode)) ||
516 !(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
517 return DEF_ADDRS_PER_BLOCK;
518 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1 << i->i_log_cluster_size);
519 }
520
521 /*
522 * CRC32
523 */
524 #define CRCPOLY_LE 0xedb88320
525
f2fs_cal_crc32(u_int32_t crc,void * buf,int len)526 u_int32_t f2fs_cal_crc32(u_int32_t crc, void *buf, int len)
527 {
528 int i;
529 unsigned char *p = (unsigned char *)buf;
530 while (len--) {
531 crc ^= *p++;
532 for (i = 0; i < 8; i++)
533 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
534 }
535 return crc;
536 }
537
f2fs_crc_valid(u_int32_t blk_crc,void * buf,int len)538 int f2fs_crc_valid(u_int32_t blk_crc, void *buf, int len)
539 {
540 u_int32_t cal_crc = 0;
541
542 cal_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, buf, len);
543
544 if (cal_crc != blk_crc) {
545 DBG(0,"CRC validation failed: cal_crc = %u, "
546 "blk_crc = %u buff_size = 0x%x\n",
547 cal_crc, blk_crc, len);
548 return -1;
549 }
550 return 0;
551 }
552
f2fs_inode_chksum(struct f2fs_node * node)553 __u32 f2fs_inode_chksum(struct f2fs_node *node)
554 {
555 struct f2fs_inode *ri = &node->i;
556 __le32 ino = node->footer.ino;
557 __le32 gen = ri->i_generation;
558 __u32 chksum, chksum_seed;
559 __u32 dummy_cs = 0;
560 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
561 unsigned int cs_size = sizeof(dummy_cs);
562
563 chksum = f2fs_cal_crc32(c.chksum_seed, (__u8 *)&ino,
564 sizeof(ino));
565 chksum_seed = f2fs_cal_crc32(chksum, (__u8 *)&gen, sizeof(gen));
566
567 chksum = f2fs_cal_crc32(chksum_seed, (__u8 *)ri, offset);
568 chksum = f2fs_cal_crc32(chksum, (__u8 *)&dummy_cs, cs_size);
569 offset += cs_size;
570 chksum = f2fs_cal_crc32(chksum, (__u8 *)ri + offset,
571 F2FS_BLKSIZE - offset);
572 return chksum;
573 }
574
f2fs_checkpoint_chksum(struct f2fs_checkpoint * cp)575 __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *cp)
576 {
577 unsigned int chksum_ofs = le32_to_cpu(cp->checksum_offset);
578 __u32 chksum;
579
580 chksum = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, chksum_ofs);
581 if (chksum_ofs < CP_CHKSUM_OFFSET) {
582 chksum_ofs += sizeof(chksum);
583 chksum = f2fs_cal_crc32(chksum, (__u8 *)cp + chksum_ofs,
584 F2FS_BLKSIZE - chksum_ofs);
585 }
586 return chksum;
587 }
588
write_inode(struct f2fs_node * inode,u64 blkaddr)589 int write_inode(struct f2fs_node *inode, u64 blkaddr)
590 {
591 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
592 inode->i.i_inode_checksum =
593 cpu_to_le32(f2fs_inode_chksum(inode));
594 return dev_write_block(inode, blkaddr);
595 }
596
597 /*
598 * try to identify the root device
599 */
get_rootdev()600 char *get_rootdev()
601 {
602 #if defined(ANDROID_WINDOWS_HOST) || defined(WITH_ANDROID)
603 return NULL;
604 #else
605 struct stat sb;
606 int fd, ret;
607 char buf[PATH_MAX + 1];
608 char *uevent, *ptr;
609 char *rootdev;
610
611 if (stat("/", &sb) == -1)
612 return NULL;
613
614 snprintf(buf, PATH_MAX, "/sys/dev/block/%u:%u/uevent",
615 major(sb.st_dev), minor(sb.st_dev));
616
617 fd = open(buf, O_RDONLY);
618
619 if (fd < 0)
620 return NULL;
621
622 ret = lseek(fd, (off_t)0, SEEK_END);
623 (void)lseek(fd, (off_t)0, SEEK_SET);
624
625 if (ret == -1) {
626 close(fd);
627 return NULL;
628 }
629
630 uevent = malloc(ret + 1);
631 ASSERT(uevent);
632
633 uevent[ret] = '\0';
634
635 ret = read(fd, uevent, ret);
636 close(fd);
637
638 ptr = strstr(uevent, "DEVNAME");
639 if (!ptr)
640 return NULL;
641
642 ret = sscanf(ptr, "DEVNAME=%s\n", buf);
643 if (strlen(buf) == 0)
644 return NULL;
645
646 ret = strlen(buf) + 5;
647 rootdev = malloc(ret + 1);
648 if (!rootdev)
649 return NULL;
650 rootdev[ret] = '\0';
651
652 snprintf(rootdev, ret + 1, "/dev/%s", buf);
653 return rootdev;
654 #endif
655 }
656
657 /*
658 * device information
659 */
f2fs_init_configuration(void)660 void f2fs_init_configuration(void)
661 {
662 int i;
663
664 memset(&c, 0, sizeof(struct f2fs_configuration));
665 c.ndevs = 1;
666 c.sectors_per_blk = DEFAULT_SECTORS_PER_BLOCK;
667 c.blks_per_seg = DEFAULT_BLOCKS_PER_SEGMENT;
668 c.wanted_total_sectors = -1;
669 c.wanted_sector_size = -1;
670 #ifndef WITH_ANDROID
671 c.preserve_limits = 1;
672 c.no_kernel_check = 1;
673 #else
674 c.no_kernel_check = 0;
675 #endif
676
677 for (i = 0; i < MAX_DEVICES; i++) {
678 c.devices[i].fd = -1;
679 c.devices[i].sector_size = DEFAULT_SECTOR_SIZE;
680 c.devices[i].end_blkaddr = -1;
681 c.devices[i].zoned_model = F2FS_ZONED_NONE;
682 }
683
684 /* calculated by overprovision ratio */
685 c.segs_per_sec = 1;
686 c.secs_per_zone = 1;
687 c.segs_per_zone = 1;
688 c.vol_label = "";
689 c.trim = 1;
690 c.kd = -1;
691 c.fixed_time = -1;
692 c.s_encoding = 0;
693 c.s_encoding_flags = 0;
694
695 /* default root owner */
696 c.root_uid = getuid();
697 c.root_gid = getgid();
698 }
699
f2fs_dev_is_writable(void)700 int f2fs_dev_is_writable(void)
701 {
702 return !c.ro || c.force;
703 }
704
705 #ifdef HAVE_SETMNTENT
is_mounted(const char * mpt,const char * device)706 static int is_mounted(const char *mpt, const char *device)
707 {
708 FILE *file = NULL;
709 struct mntent *mnt = NULL;
710
711 file = setmntent(mpt, "r");
712 if (file == NULL)
713 return 0;
714
715 while ((mnt = getmntent(file)) != NULL) {
716 if (!strcmp(device, mnt->mnt_fsname)) {
717 #ifdef MNTOPT_RO
718 if (hasmntopt(mnt, MNTOPT_RO))
719 c.ro = 1;
720 #endif
721 break;
722 }
723 }
724 endmntent(file);
725 return mnt ? 1 : 0;
726 }
727 #endif
728
f2fs_dev_is_umounted(char * path)729 int f2fs_dev_is_umounted(char *path)
730 {
731 #ifdef ANDROID_WINDOWS_HOST
732 return 0;
733 #else
734 struct stat *st_buf;
735 int is_rootdev = 0;
736 int ret = 0;
737 char *rootdev_name = get_rootdev();
738
739 if (rootdev_name) {
740 if (!strcmp(path, rootdev_name))
741 is_rootdev = 1;
742 free(rootdev_name);
743 }
744
745 /*
746 * try with /proc/mounts fist to detect RDONLY.
747 * f2fs_stop_checkpoint makes RO in /proc/mounts while RW in /etc/mtab.
748 */
749 #ifdef __linux__
750 ret = is_mounted("/proc/mounts", path);
751 if (ret) {
752 MSG(0, "Info: Mounted device!\n");
753 return -1;
754 }
755 #endif
756 #if defined(MOUNTED) || defined(_PATH_MOUNTED)
757 #ifndef MOUNTED
758 #define MOUNTED _PATH_MOUNTED
759 #endif
760 ret = is_mounted(MOUNTED, path);
761 if (ret) {
762 MSG(0, "Info: Mounted device!\n");
763 return -1;
764 }
765 #endif
766 /*
767 * If we are supposed to operate on the root device, then
768 * also check the mounts for '/dev/root', which sometimes
769 * functions as an alias for the root device.
770 */
771 if (is_rootdev) {
772 #ifdef __linux__
773 ret = is_mounted("/proc/mounts", "/dev/root");
774 if (ret) {
775 MSG(0, "Info: Mounted device!\n");
776 return -1;
777 }
778 #endif
779 }
780
781 /*
782 * If f2fs is umounted with -l, the process can still use
783 * the file system. In this case, we should not format.
784 */
785 st_buf = malloc(sizeof(struct stat));
786 ASSERT(st_buf);
787
788 if (stat(path, st_buf) == 0 && S_ISBLK(st_buf->st_mode)) {
789 int fd = open(path, O_RDONLY | O_EXCL);
790
791 if (fd >= 0) {
792 close(fd);
793 } else if (errno == EBUSY) {
794 MSG(0, "\tError: In use by the system!\n");
795 free(st_buf);
796 return -1;
797 }
798 }
799 free(st_buf);
800 return ret;
801 #endif
802 }
803
f2fs_devs_are_umounted(void)804 int f2fs_devs_are_umounted(void)
805 {
806 int i;
807
808 for (i = 0; i < c.ndevs; i++)
809 if (f2fs_dev_is_umounted((char *)c.devices[i].path))
810 return -1;
811 return 0;
812 }
813
get_kernel_version(__u8 * version)814 void get_kernel_version(__u8 *version)
815 {
816 int i;
817 for (i = 0; i < VERSION_LEN; i++) {
818 if (version[i] == '\n')
819 break;
820 }
821 memset(version + i, 0, VERSION_LEN + 1 - i);
822 }
823
get_kernel_uname_version(__u8 * version)824 void get_kernel_uname_version(__u8 *version)
825 {
826 #ifdef HAVE_SYS_UTSNAME_H
827 struct utsname buf;
828
829 memset(version, 0, VERSION_LEN);
830 if (uname(&buf))
831 return;
832
833 #if !defined(WITH_KERNEL_VERSION)
834 snprintf((char *)version,
835 VERSION_LEN, "%s %s", buf.release, buf.version);
836 #else
837 snprintf((char *)version,
838 VERSION_LEN, "%s", buf.release);
839 #endif
840 #else
841 memset(version, 0, VERSION_LEN);
842 #endif
843 }
844
845 #if defined(__linux__) && defined(_IO) && !defined(BLKGETSIZE)
846 #define BLKGETSIZE _IO(0x12,96)
847 #endif
848
849 #if defined(__linux__) && defined(_IOR) && !defined(BLKGETSIZE64)
850 #define BLKGETSIZE64 _IOR(0x12,114, size_t)
851 #endif
852
853 #if defined(__linux__) && defined(_IO) && !defined(BLKSSZGET)
854 #define BLKSSZGET _IO(0x12,104)
855 #endif
856
857 #if defined(__APPLE__)
858 #include <sys/disk.h>
859 #define BLKGETSIZE DKIOCGETBLOCKCOUNT
860 #define BLKSSZGET DKIOCGETBLOCKCOUNT
861 #endif /* APPLE_DARWIN */
862
863 #ifndef ANDROID_WINDOWS_HOST
open_check_fs(char * path,int flag)864 static int open_check_fs(char *path, int flag)
865 {
866 if (c.func != DUMP && (c.func != FSCK || c.fix_on || c.auto_fix))
867 return -1;
868
869 /* allow to open ro */
870 return open(path, O_RDONLY | flag);
871 }
872
get_device_info(int i)873 int get_device_info(int i)
874 {
875 int32_t fd = 0;
876 uint32_t sector_size;
877 #ifndef BLKGETSIZE64
878 uint32_t total_sectors;
879 #endif
880 struct stat *stat_buf;
881 #ifdef HDIO_GETGIO
882 struct hd_geometry geom;
883 #endif
884 #if !defined(WITH_ANDROID) && defined(__linux__)
885 sg_io_hdr_t io_hdr;
886 unsigned char reply_buffer[96] = {0};
887 unsigned char model_inq[6] = {MODELINQUIRY};
888 #endif
889 struct device_info *dev = c.devices + i;
890
891 if (c.sparse_mode) {
892 fd = open(dev->path, O_RDWR | O_CREAT | O_BINARY, 0644);
893 if (fd < 0) {
894 fd = open_check_fs(dev->path, O_BINARY);
895 if (fd < 0) {
896 MSG(0, "\tError: Failed to open a sparse file!\n");
897 return -1;
898 }
899 }
900 }
901
902 stat_buf = malloc(sizeof(struct stat));
903 ASSERT(stat_buf);
904
905 if (!c.sparse_mode) {
906 if (stat(dev->path, stat_buf) < 0 ) {
907 MSG(0, "\tError: Failed to get the device stat!\n");
908 free(stat_buf);
909 return -1;
910 }
911
912 if (S_ISBLK(stat_buf->st_mode) &&
913 !c.force && c.func != DUMP && !c.dry_run) {
914 fd = open(dev->path, O_RDWR | O_EXCL);
915 if (fd < 0)
916 fd = open_check_fs(dev->path, O_EXCL);
917 } else {
918 fd = open(dev->path, O_RDWR);
919 if (fd < 0)
920 fd = open_check_fs(dev->path, 0);
921 }
922 }
923 if (fd < 0) {
924 MSG(0, "\tError: Failed to open the device!\n");
925 free(stat_buf);
926 return -1;
927 }
928
929 dev->fd = fd;
930
931 if (c.sparse_mode) {
932 if (f2fs_init_sparse_file()) {
933 free(stat_buf);
934 return -1;
935 }
936 }
937
938 if (c.kd == -1) {
939 #if !defined(WITH_ANDROID) && defined(__linux__)
940 c.kd = open("/proc/version", O_RDONLY);
941 #endif
942 if (c.kd < 0) {
943 MSG(0, "\tInfo: No support kernel version!\n");
944 c.kd = -2;
945 }
946 }
947
948 if (c.sparse_mode) {
949 dev->total_sectors = c.device_size / dev->sector_size;
950 } else if (S_ISREG(stat_buf->st_mode)) {
951 dev->total_sectors = stat_buf->st_size / dev->sector_size;
952 } else if (S_ISBLK(stat_buf->st_mode)) {
953 #ifdef BLKSSZGET
954 if (ioctl(fd, BLKSSZGET, §or_size) < 0)
955 MSG(0, "\tError: Using the default sector size\n");
956 else if (dev->sector_size < sector_size)
957 dev->sector_size = sector_size;
958 #endif
959 #ifdef BLKGETSIZE64
960 if (ioctl(fd, BLKGETSIZE64, &dev->total_sectors) < 0) {
961 MSG(0, "\tError: Cannot get the device size\n");
962 free(stat_buf);
963 return -1;
964 }
965 #else
966 if (ioctl(fd, BLKGETSIZE, &total_sectors) < 0) {
967 MSG(0, "\tError: Cannot get the device size\n");
968 free(stat_buf);
969 return -1;
970 }
971 dev->total_sectors = total_sectors;
972 #endif
973 dev->total_sectors /= dev->sector_size;
974
975 if (i == 0) {
976 #ifdef HDIO_GETGIO
977 if (ioctl(fd, HDIO_GETGEO, &geom) < 0)
978 c.start_sector = 0;
979 else
980 c.start_sector = geom.start;
981 #else
982 c.start_sector = 0;
983 #endif
984 }
985
986 #if !defined(WITH_ANDROID) && defined(__linux__)
987 /* Send INQUIRY command */
988 memset(&io_hdr, 0, sizeof(sg_io_hdr_t));
989 io_hdr.interface_id = 'S';
990 io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
991 io_hdr.dxfer_len = sizeof(reply_buffer);
992 io_hdr.dxferp = reply_buffer;
993 io_hdr.cmd_len = sizeof(model_inq);
994 io_hdr.cmdp = model_inq;
995 io_hdr.timeout = 1000;
996
997 if (!ioctl(fd, SG_IO, &io_hdr)) {
998 MSG(0, "Info: [%s] Disk Model: %.16s\n",
999 dev->path, reply_buffer+16);
1000 }
1001 #endif
1002 } else {
1003 MSG(0, "\tError: Volume type is not supported!!!\n");
1004 free(stat_buf);
1005 return -1;
1006 }
1007
1008 if (!c.sector_size) {
1009 c.sector_size = dev->sector_size;
1010 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1011 } else if (c.sector_size != c.devices[i].sector_size) {
1012 MSG(0, "\tError: Different sector sizes!!!\n");
1013 free(stat_buf);
1014 return -1;
1015 }
1016
1017 #if !defined(WITH_ANDROID) && defined(__linux__)
1018 if (S_ISBLK(stat_buf->st_mode)) {
1019 if (f2fs_get_zoned_model(i) < 0) {
1020 free(stat_buf);
1021 return -1;
1022 }
1023 }
1024
1025 if (dev->zoned_model != F2FS_ZONED_NONE) {
1026
1027 /* Get the number of blocks per zones */
1028 if (f2fs_get_zone_blocks(i)) {
1029 MSG(0, "\tError: Failed to get number of blocks per zone\n");
1030 free(stat_buf);
1031 return -1;
1032 }
1033
1034 /*
1035 * Check zone configuration: for the first disk of a
1036 * multi-device volume, conventional zones are needed.
1037 */
1038 if (f2fs_check_zones(i)) {
1039 MSG(0, "\tError: Failed to check zone configuration\n");
1040 free(stat_buf);
1041 return -1;
1042 }
1043 MSG(0, "Info: Host-%s zoned block device:\n",
1044 (dev->zoned_model == F2FS_ZONED_HA) ?
1045 "aware" : "managed");
1046 MSG(0, " %u zones, %u randomly writeable zones\n",
1047 dev->nr_zones, dev->nr_rnd_zones);
1048 MSG(0, " %lu blocks per zone\n",
1049 dev->zone_blocks);
1050 }
1051 #endif
1052 /* adjust wanted_total_sectors */
1053 if (c.wanted_total_sectors != -1) {
1054 MSG(0, "Info: wanted sectors = %"PRIu64" (in %"PRIu64" bytes)\n",
1055 c.wanted_total_sectors, c.wanted_sector_size);
1056 if (c.wanted_sector_size == -1) {
1057 c.wanted_sector_size = dev->sector_size;
1058 } else if (dev->sector_size != c.wanted_sector_size) {
1059 c.wanted_total_sectors *= c.wanted_sector_size;
1060 c.wanted_total_sectors /= dev->sector_size;
1061 }
1062 }
1063
1064 c.total_sectors += dev->total_sectors;
1065 free(stat_buf);
1066 return 0;
1067 }
1068
1069 #else
1070
1071 #include "windows.h"
1072 #include "winioctl.h"
1073
1074 #if (_WIN32_WINNT >= 0x0500)
1075 #define HAVE_GET_FILE_SIZE_EX 1
1076 #endif
1077
win_get_device_size(const char * file,uint64_t * device_size)1078 static int win_get_device_size(const char *file, uint64_t *device_size)
1079 {
1080 HANDLE dev;
1081 PARTITION_INFORMATION pi;
1082 DISK_GEOMETRY gi;
1083 DWORD retbytes;
1084 #ifdef HAVE_GET_FILE_SIZE_EX
1085 LARGE_INTEGER filesize;
1086 #else
1087 DWORD filesize;
1088 #endif /* HAVE_GET_FILE_SIZE_EX */
1089
1090 dev = CreateFile(file, GENERIC_READ,
1091 FILE_SHARE_READ | FILE_SHARE_WRITE ,
1092 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
1093
1094 if (dev == INVALID_HANDLE_VALUE)
1095 return EBADF;
1096 if (DeviceIoControl(dev, IOCTL_DISK_GET_PARTITION_INFO,
1097 &pi, sizeof(PARTITION_INFORMATION),
1098 &pi, sizeof(PARTITION_INFORMATION),
1099 &retbytes, NULL)) {
1100
1101 *device_size = pi.PartitionLength.QuadPart;
1102
1103 } else if (DeviceIoControl(dev, IOCTL_DISK_GET_DRIVE_GEOMETRY,
1104 &gi, sizeof(DISK_GEOMETRY),
1105 &gi, sizeof(DISK_GEOMETRY),
1106 &retbytes, NULL)) {
1107
1108 *device_size = gi.BytesPerSector *
1109 gi.SectorsPerTrack *
1110 gi.TracksPerCylinder *
1111 gi.Cylinders.QuadPart;
1112
1113 #ifdef HAVE_GET_FILE_SIZE_EX
1114 } else if (GetFileSizeEx(dev, &filesize)) {
1115 *device_size = filesize.QuadPart;
1116 }
1117 #else
1118 } else {
1119 filesize = GetFileSize(dev, NULL);
1120 if (INVALID_FILE_SIZE != filesize)
1121 return -1;
1122 *device_size = filesize;
1123 }
1124 #endif /* HAVE_GET_FILE_SIZE_EX */
1125
1126 CloseHandle(dev);
1127 return 0;
1128 }
1129
get_device_info(int i)1130 int get_device_info(int i)
1131 {
1132 struct device_info *dev = c.devices + i;
1133 uint64_t device_size = 0;
1134 int32_t fd = 0;
1135
1136 /* Block device target is not supported on Windows. */
1137 if (!c.sparse_mode) {
1138 if (win_get_device_size(dev->path, &device_size)) {
1139 MSG(0, "\tError: Failed to get device size!\n");
1140 return -1;
1141 }
1142 } else {
1143 device_size = c.device_size;
1144 }
1145 if (c.sparse_mode) {
1146 fd = open((char *)dev->path, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
1147 } else {
1148 fd = open((char *)dev->path, O_RDWR | O_BINARY);
1149 }
1150 if (fd < 0) {
1151 MSG(0, "\tError: Failed to open the device!\n");
1152 return -1;
1153 }
1154 dev->fd = fd;
1155 dev->total_sectors = device_size / dev->sector_size;
1156 c.start_sector = 0;
1157 c.sector_size = dev->sector_size;
1158 c.sectors_per_blk = F2FS_BLKSIZE / c.sector_size;
1159 c.total_sectors += dev->total_sectors;
1160
1161 return 0;
1162 }
1163 #endif
1164
f2fs_get_device_info(void)1165 int f2fs_get_device_info(void)
1166 {
1167 int i;
1168
1169 for (i = 0; i < c.ndevs; i++)
1170 if (get_device_info(i))
1171 return -1;
1172
1173 if (c.wanted_total_sectors < c.total_sectors) {
1174 MSG(0, "Info: total device sectors = %"PRIu64" (in %u bytes)\n",
1175 c.total_sectors, c.sector_size);
1176 c.total_sectors = c.wanted_total_sectors;
1177 c.devices[0].total_sectors = c.total_sectors;
1178 }
1179 if (c.total_sectors * c.sector_size >
1180 (u_int64_t)F2FS_MAX_SEGMENT * 2 * 1024 * 1024) {
1181 MSG(0, "\tError: F2FS can support 16TB at most!!!\n");
1182 return -1;
1183 }
1184
1185 /*
1186 * Check device types and determine the final volume operation mode:
1187 * - If all devices are regular block devices, default operation.
1188 * - If at least one HM device is found, operate in HM mode (BLKZONED
1189 * feature will be enabled by mkfs).
1190 * - If an HA device is found, let mkfs decide based on the -m option
1191 * setting by the user.
1192 */
1193 c.zoned_model = F2FS_ZONED_NONE;
1194 for (i = 0; i < c.ndevs; i++) {
1195 switch (c.devices[i].zoned_model) {
1196 case F2FS_ZONED_NONE:
1197 continue;
1198 case F2FS_ZONED_HM:
1199 c.zoned_model = F2FS_ZONED_HM;
1200 break;
1201 case F2FS_ZONED_HA:
1202 if (c.zoned_model != F2FS_ZONED_HM)
1203 c.zoned_model = F2FS_ZONED_HA;
1204 break;
1205 }
1206 }
1207
1208 if (c.zoned_model != F2FS_ZONED_NONE) {
1209
1210 /*
1211 * For zoned model, the zones sizes of all zoned devices must
1212 * be equal.
1213 */
1214 for (i = 0; i < c.ndevs; i++) {
1215 if (c.devices[i].zoned_model == F2FS_ZONED_NONE)
1216 continue;
1217 if (c.zone_blocks &&
1218 c.zone_blocks != c.devices[i].zone_blocks) {
1219 MSG(0, "\tError: zones of different size are "
1220 "not supported\n");
1221 return -1;
1222 }
1223 c.zone_blocks = c.devices[i].zone_blocks;
1224 }
1225
1226 /*
1227 * Align sections to the device zone size and align F2FS zones
1228 * to the device zones. For F2FS_ZONED_HA model without the
1229 * BLKZONED feature set at format time, this is only an
1230 * optimization as sequential writes will not be enforced.
1231 */
1232 c.segs_per_sec = c.zone_blocks / DEFAULT_BLOCKS_PER_SEGMENT;
1233 c.secs_per_zone = 1;
1234 } else {
1235 if(c.zoned_mode != 0) {
1236 MSG(0, "\n Error: %s may not be a zoned block device \n",
1237 c.devices[0].path);
1238 return -1;
1239 }
1240 }
1241
1242 c.segs_per_zone = c.segs_per_sec * c.secs_per_zone;
1243
1244 MSG(0, "Info: Segments per section = %d\n", c.segs_per_sec);
1245 MSG(0, "Info: Sections per zone = %d\n", c.secs_per_zone);
1246 MSG(0, "Info: sector size = %u\n", c.sector_size);
1247 MSG(0, "Info: total sectors = %"PRIu64" (%"PRIu64" MB)\n",
1248 c.total_sectors, (c.total_sectors *
1249 (c.sector_size >> 9)) >> 11);
1250 return 0;
1251 }
1252
calc_extra_isize(void)1253 unsigned int calc_extra_isize(void)
1254 {
1255 unsigned int size = offsetof(struct f2fs_inode, i_projid);
1256
1257 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
1258 size = offsetof(struct f2fs_inode, i_projid);
1259 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1260 size = offsetof(struct f2fs_inode, i_inode_checksum);
1261 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
1262 size = offsetof(struct f2fs_inode, i_crtime);
1263 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME))
1264 size = offsetof(struct f2fs_inode, i_compr_blocks);
1265 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION))
1266 size = offsetof(struct f2fs_inode, i_extra_end);
1267
1268 return size - F2FS_EXTRA_ISIZE_OFFSET;
1269 }
1270
1271 #define ARRAY_SIZE(array) \
1272 (sizeof(array) / sizeof(array[0]))
1273
1274 static const struct {
1275 char *name;
1276 __u16 encoding_magic;
1277 __u16 default_flags;
1278
1279 } f2fs_encoding_map[] = {
1280 {
1281 .encoding_magic = F2FS_ENC_UTF8_12_1,
1282 .name = "utf8",
1283 .default_flags = 0,
1284 },
1285 };
1286
1287 static const struct enc_flags {
1288 __u16 flag;
1289 char *param;
1290 } encoding_flags[] = {
1291 { F2FS_ENC_STRICT_MODE_FL, "strict" },
1292 };
1293
1294 /* Return a positive number < 0xff indicating the encoding magic number
1295 * or a negative value indicating error. */
f2fs_str2encoding(const char * string)1296 int f2fs_str2encoding(const char *string)
1297 {
1298 int i;
1299
1300 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1301 if (!strcmp(string, f2fs_encoding_map[i].name))
1302 return f2fs_encoding_map[i].encoding_magic;
1303
1304 return -EINVAL;
1305 }
1306
f2fs_get_encoding_flags(int encoding)1307 int f2fs_get_encoding_flags(int encoding)
1308 {
1309 int i;
1310
1311 for (i = 0 ; i < ARRAY_SIZE(f2fs_encoding_map); i++)
1312 if (f2fs_encoding_map[i].encoding_magic == encoding)
1313 return f2fs_encoding_map[encoding].default_flags;
1314
1315 return 0;
1316 }
1317
f2fs_str2encoding_flags(char ** param,__u16 * flags)1318 int f2fs_str2encoding_flags(char **param, __u16 *flags)
1319 {
1320 char *f = strtok(*param, ",");
1321 const struct enc_flags *fl;
1322 int i, neg = 0;
1323
1324 while (f) {
1325 neg = 0;
1326 if (!strncmp("no", f, 2)) {
1327 neg = 1;
1328 f += 2;
1329 }
1330
1331 for (i = 0; i < ARRAY_SIZE(encoding_flags); i++) {
1332 fl = &encoding_flags[i];
1333 if (!strcmp(fl->param, f)) {
1334 if (neg) {
1335 MSG(0, "Sub %s\n", fl->param);
1336 *flags &= ~fl->flag;
1337 } else {
1338 MSG(0, "Add %s\n", fl->param);
1339 *flags |= fl->flag;
1340 }
1341
1342 goto next_flag;
1343 }
1344 }
1345 *param = f;
1346 return -EINVAL;
1347 next_flag:
1348 f = strtok(NULL, ":");
1349 }
1350 return 0;
1351 }
1352