• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/hmdfs/inode_root.c
4  *
5  * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6  */
7 
8 #include <linux/fs_stack.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 
12 #include "authority/authentication.h"
13 #include "comm/socket_adapter.h"
14 #include "comm/transport.h"
15 #include "hmdfs_dentryfile.h"
16 #include "hmdfs_device_view.h"
17 #include "hmdfs_merge_view.h"
18 #include "hmdfs_trace.h"
19 
fill_device_local_inode(struct super_block * sb,struct inode * lower_inode)20 static struct inode *fill_device_local_inode(struct super_block *sb,
21 					     struct inode *lower_inode)
22 {
23 	struct inode *inode = NULL;
24 	struct hmdfs_inode_info *info = NULL;
25 
26 	if (!igrab(lower_inode))
27 		return ERR_PTR(-ESTALE);
28 
29 	inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_LOCAL, lower_inode,
30 				       NULL);
31 	if (!inode) {
32 		hmdfs_err("iget5_locked get inode NULL");
33 		iput(lower_inode);
34 		return ERR_PTR(-ENOMEM);
35 	}
36 	if (!(inode->i_state & I_NEW)) {
37 		iput(lower_inode);
38 		return inode;
39 	}
40 
41 	info = hmdfs_i(inode);
42 	info->inode_type = HMDFS_LAYER_SECOND_LOCAL;
43 
44 	inode->i_mode =
45 		(lower_inode->i_mode & S_IFMT) | S_IRWXU | S_IRWXG | S_IXOTH;
46 
47 	inode->i_uid = KUIDT_INIT((uid_t)1000);
48 	inode->i_gid = KGIDT_INIT((gid_t)1000);
49 
50 	inode->i_atime = lower_inode->i_atime;
51 	inode->i_ctime = lower_inode->i_ctime;
52 	inode->i_mtime = lower_inode->i_mtime;
53 
54 	inode->i_op = &hmdfs_dir_inode_ops_local;
55 	inode->i_fop = &hmdfs_dir_ops_local;
56 
57 	fsstack_copy_inode_size(inode, lower_inode);
58 	unlock_new_inode(inode);
59 	return inode;
60 }
61 
fill_device_inode_remote(struct super_block * sb,uint64_t dev_id)62 static struct inode *fill_device_inode_remote(struct super_block *sb,
63 					      uint64_t dev_id)
64 {
65 	struct inode *inode = NULL;
66 	struct hmdfs_inode_info *info = NULL;
67 	struct hmdfs_peer *con = NULL;
68 
69 	con = hmdfs_lookup_from_devid(sb->s_fs_info, dev_id);
70 	if (!con)
71 		return ERR_PTR(-ENOENT);
72 
73 	inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_REMOTE, NULL, con);
74 	if (!inode) {
75 		hmdfs_err("get inode NULL");
76 		inode = ERR_PTR(-ENOMEM);
77 		goto out;
78 	}
79 	if (!(inode->i_state & I_NEW))
80 		goto out;
81 
82 	info = hmdfs_i(inode);
83 	info->inode_type = HMDFS_LAYER_SECOND_REMOTE;
84 
85 	inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH;
86 
87 	inode->i_uid = KUIDT_INIT((uid_t)1000);
88 	inode->i_gid = KGIDT_INIT((gid_t)1000);
89 	inode->i_op = &hmdfs_dev_dir_inode_ops_remote;
90 	inode->i_fop = &hmdfs_dev_dir_ops_remote;
91 
92 	unlock_new_inode(inode);
93 
94 out:
95 	peer_put(con);
96 	return inode;
97 }
98 
hmdfs_device_lookup(struct inode * parent_inode,struct dentry * child_dentry,unsigned int flags)99 struct dentry *hmdfs_device_lookup(struct inode *parent_inode,
100 				   struct dentry *child_dentry,
101 				   unsigned int flags)
102 {
103 	const char *d_name = child_dentry->d_name.name;
104 	struct inode *root_inode = NULL;
105 	struct super_block *sb = parent_inode->i_sb;
106 	struct hmdfs_sb_info *sbi = sb->s_fs_info;
107 	struct dentry *ret_dentry = NULL;
108 	int err = 0;
109 	struct hmdfs_peer *con = NULL;
110 	struct hmdfs_dentry_info *di = NULL;
111 	uint8_t *cid = NULL;
112 	struct path *root_lower_path = NULL;
113 
114 	trace_hmdfs_device_lookup(parent_inode, child_dentry, flags);
115 	if (!strncmp(d_name, DEVICE_VIEW_LOCAL,
116 		     sizeof(DEVICE_VIEW_LOCAL) - 1)) {
117 		err = init_hmdfs_dentry_info(sbi, child_dentry,
118 					     HMDFS_LAYER_SECOND_LOCAL);
119 		if (err) {
120 			ret_dentry = ERR_PTR(err);
121 			goto out;
122 		}
123 		di = hmdfs_d(sb->s_root);
124 		root_lower_path = &(di->lower_path);
125 		hmdfs_set_lower_path(child_dentry, root_lower_path);
126 		path_get(root_lower_path);
127 		root_inode = fill_device_local_inode(
128 			sb, d_inode(root_lower_path->dentry));
129 		if (IS_ERR(root_inode)) {
130 			err = PTR_ERR(root_inode);
131 			ret_dentry = ERR_PTR(err);
132 			hmdfs_put_reset_lower_path(child_dentry);
133 			goto out;
134 		}
135 		ret_dentry = d_splice_alias(root_inode, child_dentry);
136 		if (IS_ERR(ret_dentry)) {
137 			err = PTR_ERR(ret_dentry);
138 			ret_dentry = ERR_PTR(err);
139 			hmdfs_put_reset_lower_path(child_dentry);
140 			goto out;
141 		}
142 	} else {
143 		err = init_hmdfs_dentry_info(sbi, child_dentry,
144 					     HMDFS_LAYER_SECOND_REMOTE);
145 		di = hmdfs_d(child_dentry);
146 		if (err) {
147 			ret_dentry = ERR_PTR(err);
148 			goto out;
149 		}
150 		cid = kzalloc(HMDFS_CID_SIZE + 1, GFP_KERNEL);
151 		if (!cid) {
152 			err = -ENOMEM;
153 			ret_dentry = ERR_PTR(err);
154 			goto out;
155 		}
156 		strncpy(cid, d_name, HMDFS_CID_SIZE);
157 		cid[HMDFS_CID_SIZE] = '\0';
158 		con = hmdfs_lookup_from_cid(sbi, cid);
159 		if (!con) {
160 			kfree(cid);
161 			err = -ENOENT;
162 			ret_dentry = ERR_PTR(err);
163 			goto out;
164 		}
165 		di->device_id = con->device_id;
166 		root_inode = fill_device_inode_remote(sb, di->device_id);
167 		if (IS_ERR(root_inode)) {
168 			kfree(cid);
169 			err = PTR_ERR(root_inode);
170 			ret_dentry = ERR_PTR(err);
171 			goto out;
172 		}
173 		ret_dentry = d_splice_alias(root_inode, child_dentry);
174 		kfree(cid);
175 	}
176 	if (root_inode)
177 		hmdfs_root_inode_perm_init(root_inode);
178 	if (!err)
179 		hmdfs_set_time(child_dentry, jiffies);
180 out:
181 	if (con)
182 		peer_put(con);
183 	trace_hmdfs_device_lookup_end(parent_inode, child_dentry, err);
184 	return ret_dentry;
185 }
186 
hmdfs_root_lookup(struct inode * parent_inode,struct dentry * child_dentry,unsigned int flags)187 struct dentry *hmdfs_root_lookup(struct inode *parent_inode,
188 				 struct dentry *child_dentry,
189 				 unsigned int flags)
190 {
191 	const char *d_name = child_dentry->d_name.name;
192 	struct inode *root_inode = NULL;
193 	struct super_block *sb = parent_inode->i_sb;
194 	struct hmdfs_sb_info *sbi = sb->s_fs_info;
195 	struct dentry *ret = ERR_PTR(-ENOENT);
196 	struct path root_path;
197 
198 	trace_hmdfs_root_lookup(parent_inode, child_dentry, flags);
199 	if (sbi->s_merge_switch && !strcmp(d_name, MERGE_VIEW_ROOT)) {
200 		ret = hmdfs_lookup_merge(parent_inode, child_dentry, flags);
201 		if (ret && !IS_ERR(ret))
202 			child_dentry = ret;
203 		root_inode = d_inode(child_dentry);
204 	} else if (!strcmp(d_name, DEVICE_VIEW_ROOT)) {
205 		ret = ERR_PTR(init_hmdfs_dentry_info(
206 			sbi, child_dentry, HMDFS_LAYER_FIRST_DEVICE));
207 		if (IS_ERR(ret))
208 			goto out;
209 		ret = ERR_PTR(kern_path(sbi->local_src, 0, &root_path));
210 		if (IS_ERR(ret))
211 			goto out;
212 		root_inode = fill_device_inode(sb, d_inode(root_path.dentry));
213 		ret = d_splice_alias(root_inode, child_dentry);
214 		path_put(&root_path);
215 	}
216 	if (!IS_ERR(ret) && root_inode)
217 		hmdfs_root_inode_perm_init(root_inode);
218 
219 out:
220 	trace_hmdfs_root_lookup_end(parent_inode, child_dentry,
221 				    PTR_ERR_OR_ZERO(ret));
222 	return ret;
223 }
224 
225 const struct inode_operations hmdfs_device_ops = {
226 	.lookup = hmdfs_device_lookup,
227 };
228 
229 const struct inode_operations hmdfs_root_ops = {
230 	.lookup = hmdfs_root_lookup,
231 };
232 
fill_device_inode(struct super_block * sb,struct inode * lower_inode)233 struct inode *fill_device_inode(struct super_block *sb,
234 				struct inode *lower_inode)
235 {
236 	struct inode *inode = NULL;
237 	struct hmdfs_inode_info *info = NULL;
238 
239 	inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV, NULL, NULL);
240 	if (!inode) {
241 		hmdfs_err("iget5_locked get inode NULL");
242 		return ERR_PTR(-ENOMEM);
243 	}
244 	if (!(inode->i_state & I_NEW))
245 		return inode;
246 
247 	info = hmdfs_i(inode);
248 	info->inode_type = HMDFS_LAYER_FIRST_DEVICE;
249 
250 	inode->i_atime = lower_inode->i_atime;
251 	inode->i_ctime = lower_inode->i_ctime;
252 	inode->i_mtime = lower_inode->i_mtime;
253 
254 	inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | S_IXUSR |
255 			S_IRGRP | S_IXGRP | S_IXOTH;
256 	inode->i_uid = KUIDT_INIT((uid_t)1000);
257 	inode->i_gid = KGIDT_INIT((gid_t)1000);
258 	inode->i_op = &hmdfs_device_ops;
259 	inode->i_fop = &hmdfs_device_fops;
260 
261 	fsstack_copy_inode_size(inode, lower_inode);
262 	unlock_new_inode(inode);
263 	return inode;
264 }
265 
fill_root_inode(struct super_block * sb,struct inode * lower_inode)266 struct inode *fill_root_inode(struct super_block *sb, struct inode *lower_inode)
267 {
268 	struct inode *inode = NULL;
269 	struct hmdfs_inode_info *info = NULL;
270 
271 	if (!igrab(lower_inode))
272 		return ERR_PTR(-ESTALE);
273 
274 	inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_ANCESTOR, lower_inode,
275 				       NULL);
276 	if (!inode) {
277 		hmdfs_err("iget5_locked get inode NULL");
278 		iput(lower_inode);
279 		return ERR_PTR(-ENOMEM);
280 	}
281 	if (!(inode->i_state & I_NEW)) {
282 		iput(lower_inode);
283 		return inode;
284 	}
285 
286 	info = hmdfs_i(inode);
287 	info->inode_type = HMDFS_LAYER_ZERO;
288 	inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | S_IXUSR |
289 			S_IRGRP | S_IXGRP | S_IXOTH;
290 
291 #ifdef CONFIG_HMDFS_FS_PERMISSION
292 	inode->i_uid = lower_inode->i_uid;
293 	inode->i_gid = lower_inode->i_gid;
294 #else
295 	inode->i_uid = KUIDT_INIT((uid_t)1000);
296 	inode->i_gid = KGIDT_INIT((gid_t)1000);
297 #endif
298 	inode->i_atime = lower_inode->i_atime;
299 	inode->i_ctime = lower_inode->i_ctime;
300 	inode->i_mtime = lower_inode->i_mtime;
301 
302 	inode->i_op = &hmdfs_root_ops;
303 	inode->i_fop = &hmdfs_root_fops;
304 	fsstack_copy_inode_size(inode, lower_inode);
305 	unlock_new_inode(inode);
306 	return inode;
307 }
308