• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * node.h
3  *
4  * Many parts of codes are copied from Linux kernel/fs/f2fs.
5  *
6  * Copyright (C) 2015 Huawei Ltd.
7  * Witten by:
8  *   Hou Pengyang <houpengyang@huawei.com>
9  *   Liu Shuoran <liushuoran@huawei.com>
10  *   Jaegeuk Kim <jaegeuk@kernel.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 #ifndef _NODE_H_
17 #define _NODE_H_
18 
19 #include "fsck.h"
20 
IS_INODE(struct f2fs_node * node)21 static inline int IS_INODE(struct f2fs_node *node)
22 {
23 	return ((node)->footer.nid == (node)->footer.ino);
24 }
25 
ADDRS_PER_PAGE(struct f2fs_sb_info * sbi,struct f2fs_node * node_blk,struct f2fs_node * inode_blk)26 static inline unsigned int ADDRS_PER_PAGE(struct f2fs_sb_info *sbi,
27 		struct f2fs_node *node_blk, struct f2fs_node *inode_blk)
28 {
29 	nid_t ino = le32_to_cpu(node_blk->footer.ino);
30 	unsigned int nblocks;
31 
32 	if (IS_INODE(node_blk))
33 		return ADDRS_PER_INODE(&node_blk->i);
34 
35 	if (!inode_blk) {
36 		struct node_info ni;
37 
38 		inode_blk = calloc(BLOCK_SZ, 2);
39 		ASSERT(inode_blk);
40 
41 		get_node_info(sbi, ino, &ni);
42 		ASSERT(dev_read_block(inode_blk, ni.blk_addr) >= 0);
43 		nblocks = ADDRS_PER_BLOCK(&inode_blk->i);
44 		free(inode_blk);
45 	} else {
46 		nblocks = ADDRS_PER_BLOCK(&inode_blk->i);
47 	}
48 	return nblocks;
49 }
50 
blkaddr_in_inode(struct f2fs_node * node)51 static inline __le32 *blkaddr_in_inode(struct f2fs_node *node)
52 {
53 	return node->i.i_addr + get_extra_isize(node);
54 }
55 
blkaddr_in_node(struct f2fs_node * node)56 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
57 {
58 	return IS_INODE(node) ? blkaddr_in_inode(node) : node->dn.addr;
59 }
60 
datablock_addr(struct f2fs_node * node_page,unsigned int offset)61 static inline block_t datablock_addr(struct f2fs_node *node_page,
62 					unsigned int offset)
63 {
64 	__le32 *addr_array;
65 
66 	ASSERT(node_page);
67 	addr_array = blkaddr_in_node(node_page);
68 	return le32_to_cpu(addr_array[offset]);
69 }
70 
set_nid(struct f2fs_node * rn,int off,nid_t nid,int i)71 static inline void set_nid(struct f2fs_node * rn, int off, nid_t nid, int i)
72 {
73 	if (i)
74 		rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
75 	else
76 		rn->in.nid[off] = cpu_to_le32(nid);
77 }
78 
get_nid(struct f2fs_node * rn,int off,int i)79 static inline nid_t get_nid(struct f2fs_node * rn, int off, int i)
80 {
81 	if (i)
82 		return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
83 	else
84 		return le32_to_cpu(rn->in.nid[off]);
85 }
86 
87 enum {
88 	ALLOC_NODE,	/* allocate a new node page if needed */
89 	LOOKUP_NODE,	/* lookup up a node without readahead */
90 	LOOKUP_NODE_RA,
91 };
92 
set_new_dnode(struct dnode_of_data * dn,struct f2fs_node * iblk,struct f2fs_node * nblk,nid_t nid)93 static inline void set_new_dnode(struct dnode_of_data *dn,
94 		struct f2fs_node *iblk, struct f2fs_node *nblk, nid_t nid)
95 {
96 	memset(dn, 0, sizeof(*dn));
97 	dn->inode_blk = iblk;
98 	dn->node_blk = nblk;
99 	dn->nid = nid;
100 	dn->idirty = 0;
101 	dn->ndirty = 0;
102 }
103 
inc_inode_blocks(struct dnode_of_data * dn)104 static inline void inc_inode_blocks(struct dnode_of_data *dn)
105 {
106 	u64 blocks = le64_to_cpu(dn->inode_blk->i.i_blocks);
107 
108 	dn->inode_blk->i.i_blocks = cpu_to_le64(blocks + 1);
109 	dn->idirty = 1;
110 }
111 
IS_DNODE(struct f2fs_node * node_page)112 static inline int IS_DNODE(struct f2fs_node *node_page)
113 {
114 	unsigned int ofs = ofs_of_node(node_page);
115 
116 	if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
117 			ofs == 5 + 2 * NIDS_PER_BLOCK)
118 		return 0;
119 
120 	if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
121 		ofs -= 6 + 2 * NIDS_PER_BLOCK;
122 		if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
123 			return 0;
124 	}
125 	return 1;
126 }
127 
ino_of_node(struct f2fs_node * node_blk)128 static inline nid_t ino_of_node(struct f2fs_node *node_blk)
129 {
130 	return le32_to_cpu(node_blk->footer.ino);
131 }
132 
cpver_of_node(struct f2fs_node * node_blk)133 static inline __u64 cpver_of_node(struct f2fs_node *node_blk)
134 {
135 	return le64_to_cpu(node_blk->footer.cp_ver);
136 }
137 
is_recoverable_dnode(struct f2fs_sb_info * sbi,struct f2fs_node * node_blk)138 static inline bool is_recoverable_dnode(struct f2fs_sb_info *sbi,
139 						struct f2fs_node *node_blk)
140 {
141 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
142 	__u64 cp_ver = cur_cp_version(ckpt);
143 
144 	/* Don't care crc part, if fsck.f2fs sets it. */
145 	if (is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
146 		return (cp_ver << 32) == (cpver_of_node(node_blk) << 32);
147 
148 	if (is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
149 		cp_ver |= (cur_cp_crc(ckpt) << 32);
150 
151 	return cp_ver == cpver_of_node(node_blk);
152 }
153 
next_blkaddr_of_node(struct f2fs_node * node_blk)154 static inline block_t next_blkaddr_of_node(struct f2fs_node *node_blk)
155 {
156 	return le32_to_cpu(node_blk->footer.next_blkaddr);
157 }
158 
is_node(struct f2fs_node * node_blk,int type)159 static inline int is_node(struct f2fs_node *node_blk, int type)
160 {
161 	return le32_to_cpu(node_blk->footer.flag) & (1 << type);
162 }
163 
set_cold_node(struct f2fs_node * rn,bool is_dir)164 static inline void set_cold_node(struct f2fs_node *rn, bool is_dir)
165 {
166 	unsigned int flag = le32_to_cpu(rn->footer.flag);
167 
168 	if (is_dir)
169 		flag &= ~(0x1 << COLD_BIT_SHIFT);
170 	else
171 		flag |= (0x1 << COLD_BIT_SHIFT);
172 	rn->footer.flag = cpu_to_le32(flag);
173 }
174 
175 #define is_fsync_dnode(node_blk)	is_node(node_blk, FSYNC_BIT_SHIFT)
176 #define is_dent_dnode(node_blk)		is_node(node_blk, DENT_BIT_SHIFT)
177 
178 #endif
179