1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/inode_root.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include <linux/fs_stack.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11
12 #include "authority/authentication.h"
13 #include "comm/socket_adapter.h"
14 #include "comm/transport.h"
15 #include "hmdfs_dentryfile.h"
16 #include "hmdfs_device_view.h"
17 #include "hmdfs_merge_view.h"
18 #include "hmdfs_trace.h"
19
fill_device_local_inode(struct super_block * sb,struct inode * lower_inode)20 static struct inode *fill_device_local_inode(struct super_block *sb,
21 struct inode *lower_inode)
22 {
23 struct inode *inode = NULL;
24 struct hmdfs_inode_info *info = NULL;
25
26 if (!igrab(lower_inode))
27 return ERR_PTR(-ESTALE);
28
29 inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_LOCAL, lower_inode,
30 NULL);
31 if (!inode) {
32 hmdfs_err("iget5_locked get inode NULL");
33 iput(lower_inode);
34 return ERR_PTR(-ENOMEM);
35 }
36 if (!(inode->i_state & I_NEW)) {
37 iput(lower_inode);
38 return inode;
39 }
40
41 info = hmdfs_i(inode);
42 info->inode_type = HMDFS_LAYER_SECOND_LOCAL;
43
44 inode->i_mode =
45 (lower_inode->i_mode & S_IFMT) | S_IRWXU | S_IRWXG | S_IXOTH;
46
47 inode->i_uid = KUIDT_INIT((uid_t)1000);
48 inode->i_gid = KGIDT_INIT((gid_t)1000);
49
50 inode->i_atime = lower_inode->i_atime;
51 inode->i_ctime = lower_inode->i_ctime;
52 inode->i_mtime = lower_inode->i_mtime;
53
54 inode->i_op = &hmdfs_dir_inode_ops_local;
55 inode->i_fop = &hmdfs_dir_ops_local;
56
57 fsstack_copy_inode_size(inode, lower_inode);
58 unlock_new_inode(inode);
59 return inode;
60 }
61
fill_device_inode_remote(struct super_block * sb,uint64_t dev_id)62 static struct inode *fill_device_inode_remote(struct super_block *sb,
63 uint64_t dev_id)
64 {
65 struct inode *inode = NULL;
66 struct hmdfs_inode_info *info = NULL;
67 struct hmdfs_peer *con = NULL;
68
69 con = hmdfs_lookup_from_devid(sb->s_fs_info, dev_id);
70 if (!con)
71 return ERR_PTR(-ENOENT);
72
73 inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_REMOTE, NULL, con);
74 if (!inode) {
75 hmdfs_err("get inode NULL");
76 inode = ERR_PTR(-ENOMEM);
77 goto out;
78 }
79 if (!(inode->i_state & I_NEW))
80 goto out;
81
82 info = hmdfs_i(inode);
83 info->inode_type = HMDFS_LAYER_SECOND_REMOTE;
84
85 inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH;
86
87 inode->i_uid = KUIDT_INIT((uid_t)1000);
88 inode->i_gid = KGIDT_INIT((gid_t)1000);
89 inode->i_op = &hmdfs_dev_dir_inode_ops_remote;
90 inode->i_fop = &hmdfs_dev_dir_ops_remote;
91
92 unlock_new_inode(inode);
93
94 out:
95 peer_put(con);
96 return inode;
97 }
98
fill_device_inode_cloud(struct super_block * sb)99 static struct inode *fill_device_inode_cloud(struct super_block *sb)
100 {
101 struct inode *inode = NULL;
102 struct hmdfs_inode_info *info = NULL;
103
104 inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV_CLOUD, NULL, NULL);
105 if (!inode) {
106 hmdfs_err("get inode NULL");
107 inode = ERR_PTR(-ENOMEM);
108 goto out;
109 }
110 if (!(inode->i_state & I_NEW))
111 goto out;
112
113 info = hmdfs_i(inode);
114 info->inode_type = HMDFS_LAYER_SECOND_CLOUD;
115
116 inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH;
117
118 inode->i_uid = KUIDT_INIT((uid_t)1000);
119 inode->i_gid = KGIDT_INIT((gid_t)1000);
120 inode->i_op = &hmdfs_dev_dir_inode_ops_cloud;
121 inode->i_fop = &hmdfs_dev_dir_ops_cloud;
122
123 unlock_new_inode(inode);
124
125 out:
126 return inode;
127 }
128
hmdfs_device_lookup(struct inode * parent_inode,struct dentry * child_dentry,unsigned int flags)129 struct dentry *hmdfs_device_lookup(struct inode *parent_inode,
130 struct dentry *child_dentry,
131 unsigned int flags)
132 {
133 const char *d_name = child_dentry->d_name.name;
134 struct inode *root_inode = NULL;
135 struct super_block *sb = parent_inode->i_sb;
136 struct hmdfs_sb_info *sbi = sb->s_fs_info;
137 struct dentry *ret_dentry = NULL;
138 int err = 0;
139 struct hmdfs_peer *con = NULL;
140 struct hmdfs_dentry_info *di = NULL;
141 uint8_t *cid = NULL;
142 struct path *root_lower_path = NULL;
143
144 trace_hmdfs_device_lookup(parent_inode, child_dentry, flags);
145 if (!strncmp(d_name, DEVICE_VIEW_LOCAL,
146 sizeof(DEVICE_VIEW_LOCAL) - 1)) {
147 err = init_hmdfs_dentry_info(sbi, child_dentry,
148 HMDFS_LAYER_SECOND_LOCAL);
149 if (err) {
150 ret_dentry = ERR_PTR(err);
151 goto out;
152 }
153 di = hmdfs_d(sb->s_root);
154 root_lower_path = &(di->lower_path);
155 hmdfs_set_lower_path(child_dentry, root_lower_path);
156 path_get(root_lower_path);
157 root_inode = fill_device_local_inode(
158 sb, d_inode(root_lower_path->dentry));
159 if (IS_ERR(root_inode)) {
160 err = PTR_ERR(root_inode);
161 ret_dentry = ERR_PTR(err);
162 hmdfs_put_reset_lower_path(child_dentry);
163 goto out;
164 }
165 ret_dentry = d_splice_alias(root_inode, child_dentry);
166 if (IS_ERR(ret_dentry)) {
167 err = PTR_ERR(ret_dentry);
168 ret_dentry = ERR_PTR(err);
169 hmdfs_put_reset_lower_path(child_dentry);
170 goto out;
171 }
172 } else if (!strncmp(d_name, DEVICE_VIEW_CLOUD,
173 sizeof(DEVICE_VIEW_CLOUD) - 1)) {
174 err = init_hmdfs_dentry_info(sbi, child_dentry,
175 HMDFS_LAYER_SECOND_CLOUD);
176 if (err) {
177 ret_dentry = ERR_PTR(err);
178 goto out;
179 }
180 di = hmdfs_d(sb->s_root);
181 root_inode = fill_device_inode_cloud(sb);
182 if (IS_ERR(root_inode)) {
183 err = PTR_ERR(root_inode);
184 ret_dentry = ERR_PTR(err);
185 goto out;
186 }
187 ret_dentry = d_splice_alias(root_inode, child_dentry);
188 if (IS_ERR(ret_dentry)) {
189 err = PTR_ERR(ret_dentry);
190 ret_dentry = ERR_PTR(err);
191 goto out;
192 }
193
194 } else {
195 err = init_hmdfs_dentry_info(sbi, child_dentry,
196 HMDFS_LAYER_SECOND_REMOTE);
197 di = hmdfs_d(child_dentry);
198 if (err) {
199 ret_dentry = ERR_PTR(err);
200 goto out;
201 }
202 cid = kzalloc(HMDFS_CID_SIZE + 1, GFP_KERNEL);
203 if (!cid) {
204 err = -ENOMEM;
205 ret_dentry = ERR_PTR(err);
206 goto out;
207 }
208 strncpy(cid, d_name, HMDFS_CID_SIZE);
209 cid[HMDFS_CID_SIZE] = '\0';
210 con = hmdfs_lookup_from_cid(sbi, cid);
211 if (!con) {
212 kfree(cid);
213 err = -ENOENT;
214 ret_dentry = ERR_PTR(err);
215 goto out;
216 }
217 di->device_id = con->device_id;
218 root_inode = fill_device_inode_remote(sb, di->device_id);
219 if (IS_ERR(root_inode)) {
220 kfree(cid);
221 err = PTR_ERR(root_inode);
222 ret_dentry = ERR_PTR(err);
223 goto out;
224 }
225 ret_dentry = d_splice_alias(root_inode, child_dentry);
226 kfree(cid);
227 }
228 if (root_inode)
229 hmdfs_root_inode_perm_init(root_inode);
230 if (!err)
231 hmdfs_set_time(child_dentry, jiffies);
232 out:
233 if (con)
234 peer_put(con);
235 trace_hmdfs_device_lookup_end(parent_inode, child_dentry, err);
236 return ret_dentry;
237 }
238
hmdfs_root_lookup(struct inode * parent_inode,struct dentry * child_dentry,unsigned int flags)239 struct dentry *hmdfs_root_lookup(struct inode *parent_inode,
240 struct dentry *child_dentry,
241 unsigned int flags)
242 {
243 const char *d_name = child_dentry->d_name.name;
244 struct inode *root_inode = NULL;
245 struct super_block *sb = parent_inode->i_sb;
246 struct hmdfs_sb_info *sbi = sb->s_fs_info;
247 struct dentry *ret = ERR_PTR(-ENOENT);
248 struct path root_path;
249
250 trace_hmdfs_root_lookup(parent_inode, child_dentry, flags);
251 if (sbi->s_merge_switch && !strcmp(d_name, MERGE_VIEW_ROOT)) {
252 ret = hmdfs_lookup_merge(parent_inode, child_dentry, flags);
253 if (ret && !IS_ERR(ret))
254 child_dentry = ret;
255 root_inode = d_inode(child_dentry);
256 } else if (sbi->s_merge_switch && !strcmp(d_name, CLOUD_MERGE_VIEW_ROOT)) {
257 ret = hmdfs_lookup_cloud_merge(parent_inode, child_dentry, flags);
258 if (ret && !IS_ERR(ret))
259 child_dentry = ret;
260 root_inode = d_inode(child_dentry);
261 } else if (!strcmp(d_name, DEVICE_VIEW_ROOT)) {
262 ret = ERR_PTR(init_hmdfs_dentry_info(
263 sbi, child_dentry, HMDFS_LAYER_FIRST_DEVICE));
264 if (IS_ERR(ret))
265 goto out;
266 ret = ERR_PTR(kern_path(sbi->local_src, 0, &root_path));
267 if (IS_ERR(ret))
268 goto out;
269 root_inode = fill_device_inode(sb, d_inode(root_path.dentry));
270 ret = d_splice_alias(root_inode, child_dentry);
271 path_put(&root_path);
272 }
273 if (!IS_ERR(ret) && root_inode)
274 hmdfs_root_inode_perm_init(root_inode);
275
276 out:
277 trace_hmdfs_root_lookup_end(parent_inode, child_dentry,
278 PTR_ERR_OR_ZERO(ret));
279 return ret;
280 }
281
282 const struct inode_operations hmdfs_device_ops = {
283 .lookup = hmdfs_device_lookup,
284 };
285
286 const struct inode_operations hmdfs_root_ops = {
287 .lookup = hmdfs_root_lookup,
288 };
289
fill_device_inode(struct super_block * sb,struct inode * lower_inode)290 struct inode *fill_device_inode(struct super_block *sb,
291 struct inode *lower_inode)
292 {
293 struct inode *inode = NULL;
294 struct hmdfs_inode_info *info = NULL;
295
296 inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_DEV, NULL, NULL);
297 if (!inode) {
298 hmdfs_err("iget5_locked get inode NULL");
299 return ERR_PTR(-ENOMEM);
300 }
301 if (!(inode->i_state & I_NEW))
302 return inode;
303
304 info = hmdfs_i(inode);
305 info->inode_type = HMDFS_LAYER_FIRST_DEVICE;
306
307 inode->i_atime = lower_inode->i_atime;
308 inode->i_ctime = lower_inode->i_ctime;
309 inode->i_mtime = lower_inode->i_mtime;
310
311 inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | S_IXUSR |
312 S_IRGRP | S_IXGRP | S_IXOTH;
313 inode->i_uid = KUIDT_INIT((uid_t)1000);
314 inode->i_gid = KGIDT_INIT((gid_t)1000);
315 inode->i_op = &hmdfs_device_ops;
316 inode->i_fop = &hmdfs_device_fops;
317
318 fsstack_copy_inode_size(inode, lower_inode);
319 unlock_new_inode(inode);
320 return inode;
321 }
322
fill_root_inode(struct super_block * sb,struct inode * lower_inode)323 struct inode *fill_root_inode(struct super_block *sb, struct inode *lower_inode)
324 {
325 struct inode *inode = NULL;
326 struct hmdfs_inode_info *info = NULL;
327
328 if (!igrab(lower_inode))
329 return ERR_PTR(-ESTALE);
330
331 inode = hmdfs_iget_locked_root(sb, HMDFS_ROOT_ANCESTOR, lower_inode,
332 NULL);
333 if (!inode) {
334 hmdfs_err("iget5_locked get inode NULL");
335 iput(lower_inode);
336 return ERR_PTR(-ENOMEM);
337 }
338 if (!(inode->i_state & I_NEW)) {
339 iput(lower_inode);
340 return inode;
341 }
342
343 info = hmdfs_i(inode);
344 info->inode_type = HMDFS_LAYER_ZERO;
345 inode->i_mode = (lower_inode->i_mode & S_IFMT) | S_IRUSR | S_IXUSR |
346 S_IRGRP | S_IXGRP | S_IXOTH;
347
348 #ifdef CONFIG_HMDFS_FS_PERMISSION
349 inode->i_uid = lower_inode->i_uid;
350 inode->i_gid = lower_inode->i_gid;
351 #else
352 inode->i_uid = KUIDT_INIT((uid_t)1000);
353 inode->i_gid = KGIDT_INIT((gid_t)1000);
354 #endif
355 inode->i_atime = lower_inode->i_atime;
356 inode->i_ctime = lower_inode->i_ctime;
357 inode->i_mtime = lower_inode->i_mtime;
358
359 inode->i_op = &hmdfs_root_ops;
360 inode->i_fop = &hmdfs_root_fops;
361 fsstack_copy_inode_size(inode, lower_inode);
362 unlock_new_inode(inode);
363 return inode;
364 }
365