1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/hmdfs/inode_share.h
4 *
5 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
6 */
7
8 #include "hmdfs_share.h"
9
hmdfs_is_dst_path(struct path * src,struct path * dst)10 static inline bool hmdfs_is_dst_path(struct path *src, struct path *dst)
11 {
12 return (src->dentry == dst->dentry) && (src->mnt == dst->mnt);
13 }
14
is_dst_device(char * src_cid,char * dst_cid)15 static inline bool is_dst_device(char *src_cid, char *dst_cid)
16 {
17 return strncmp(src_cid, dst_cid, HMDFS_CID_SIZE) == 0;
18 }
19
hmdfs_is_share_file(struct file * file)20 bool hmdfs_is_share_file(struct file *file)
21 {
22 struct file *cur_file = file;
23 struct hmdfs_dentry_info *gdi;
24 struct hmdfs_file_info *gfi;
25
26 while (cur_file->f_inode->i_sb->s_magic == HMDFS_SUPER_MAGIC) {
27 gdi = hmdfs_d(cur_file->f_path.dentry);
28 gfi = hmdfs_f(cur_file);
29 if (hm_isshare(gdi->file_type))
30 return true;
31 if (gfi->lower_file)
32 cur_file = gfi->lower_file;
33 else
34 break;
35 }
36
37 return false;
38 }
39
remove_and_release_share_item(struct hmdfs_share_item * item)40 static void remove_and_release_share_item(struct hmdfs_share_item *item)
41 {
42 list_del(&item->list);
43 item->hst->item_cnt--;
44 fput(item->file);
45 kfree(item->relative_path.name);
46 kfree(item);
47 }
48
is_share_item_timeout(struct hmdfs_share_item * item)49 static inline bool is_share_item_timeout(struct hmdfs_share_item *item)
50 {
51 return !item->opened && item->timeout;
52 }
53
hmdfs_lookup_share_item(struct hmdfs_share_table * st,struct qstr * cur_relative_path)54 struct hmdfs_share_item *hmdfs_lookup_share_item(struct hmdfs_share_table *st,
55 struct qstr *cur_relative_path)
56 {
57 struct hmdfs_share_item *item, *tmp;
58
59 list_for_each_entry_safe(item, tmp, &st->item_list_head, list) {
60 if (is_share_item_timeout(item)){
61 remove_and_release_share_item(item);
62 } else {
63 if (qstr_eq(&item->relative_path, cur_relative_path))
64 return item;
65 }
66 }
67
68 return NULL;
69 }
70
share_item_timeout_work(struct work_struct * work)71 static void share_item_timeout_work(struct work_struct *work) {
72 struct hmdfs_share_item *item =
73 container_of(work, struct hmdfs_share_item, d_work.work);
74
75 item->timeout = true;
76 }
77
insert_share_item(struct hmdfs_share_table * st,struct qstr * relative_path,struct file * file,char * cid)78 int insert_share_item(struct hmdfs_share_table *st, struct qstr *relative_path,
79 struct file *file, char *cid)
80 {
81 struct hmdfs_share_item *new_item = NULL;
82 char *path_name;
83 int err = 0;
84
85 if (st->item_cnt >= st->max_cnt) {
86 int ret = hmdfs_clear_first_item(st);
87 if (unlikely(ret)) {
88 err = -EMFILE;
89 goto err_out;
90 }
91 }
92
93 path_name = kzalloc(PATH_MAX, GFP_KERNEL);
94 if (unlikely(!path_name)) {
95 err = -EMFILE;
96 goto err_out;
97 }
98 strcpy(path_name, relative_path->name);
99
100 new_item = kmalloc(sizeof(*new_item), GFP_KERNEL);
101 if (unlikely(!new_item)) {
102 err = -ENOMEM;
103 kfree(path_name);
104 goto err_out;
105 }
106
107 new_item->file = file;
108 get_file(file);
109 new_item->relative_path.name = path_name;
110 new_item->relative_path.len = relative_path->len;
111 memcpy(new_item->cid, cid, HMDFS_CID_SIZE);
112 new_item->opened = false;
113 new_item->timeout = false;
114 list_add_tail(&new_item->list, &st->item_list_head);
115 new_item->hst = st;
116
117 INIT_DELAYED_WORK(&new_item->d_work, share_item_timeout_work);
118 queue_delayed_work(new_item->hst->share_item_timeout_wq,
119 &new_item->d_work, HZ * HMDFS_SHARE_ITEM_TIMEOUT_S);
120
121 st->item_cnt++;
122
123 err_out:
124 return err;
125 }
126
update_share_item(struct hmdfs_share_item * item,struct file * file,char * cid)127 void update_share_item(struct hmdfs_share_item *item, struct file *file,
128 char *cid)
129 {
130 /* if not the same file, we need to update struct file */
131 if (!hmdfs_is_dst_path(&file->f_path, &item->file->f_path)) {
132 fput(item->file);
133 get_file(file);
134 item->file = file;
135 }
136 memcpy(item->cid, cid, HMDFS_CID_SIZE);
137
138 if (!cancel_delayed_work_sync(&item->d_work))
139 item->timeout = false;
140
141 queue_delayed_work(item->hst->share_item_timeout_wq, &item->d_work,
142 HZ * HMDFS_SHARE_ITEM_TIMEOUT_S);
143 }
144
in_share_dir(struct dentry * child_dentry)145 bool in_share_dir(struct dentry *child_dentry)
146 {
147 struct dentry *parent_dentry = dget_parent(child_dentry);
148 bool ret = false;
149
150 if (!strncmp(parent_dentry->d_name.name, SHARE_RESERVED_DIR,
151 strlen(SHARE_RESERVED_DIR)))
152 ret = true;
153
154 dput(parent_dentry);
155 return ret;
156 }
157
is_share_dir(struct inode * inode,const char * name)158 inline bool is_share_dir(struct inode *inode, const char *name)
159 {
160 return (S_ISDIR(inode->i_mode) &&
161 !strncmp(name, SHARE_RESERVED_DIR, strlen(SHARE_RESERVED_DIR)));
162 }
163
get_path_from_share_table(struct hmdfs_sb_info * sbi,struct dentry * cur_dentry,struct path * src_path)164 int get_path_from_share_table(struct hmdfs_sb_info *sbi,
165 struct dentry *cur_dentry,
166 struct path *src_path)
167 {
168 struct hmdfs_share_item *item;
169 const char *path_name;
170 struct qstr relative_path;
171 int err = 0;
172
173 path_name = hmdfs_get_dentry_relative_path(cur_dentry);
174 if (unlikely(!path_name)) {
175 err = -ENOMEM;
176 goto err_out;
177 }
178 relative_path.name = path_name;
179 relative_path.len = strlen(path_name);
180
181 spin_lock(&sbi->share_table.item_list_lock);
182 item = hmdfs_lookup_share_item(&sbi->share_table, &relative_path);
183 if (!item) {
184 err = -ENOENT;
185 goto unlock;
186 }
187 path_get(&item->file->f_path);
188 *src_path = item->file->f_path;
189 unlock:
190 spin_unlock(&sbi->share_table.item_list_lock);
191 kfree(path_name);
192 err_out:
193 return err;
194 }
195
hmdfs_clear_share_item_offline(struct hmdfs_peer * conn)196 void hmdfs_clear_share_item_offline(struct hmdfs_peer *conn)
197 {
198 struct hmdfs_sb_info *sbi = conn->sbi;
199 struct hmdfs_share_item *item, *tmp;
200
201 spin_lock(&sbi->share_table.item_list_lock);
202 list_for_each_entry_safe(item, tmp, &sbi->share_table.item_list_head,
203 list) {
204 if (is_dst_device(item->cid, conn->cid)) {
205 /* release the item that was not closed properly */
206 if (item->opened)
207 remove_and_release_share_item(item);
208 }
209 }
210 spin_unlock(&sbi->share_table.item_list_lock);
211 }
212
reset_item_opened_status(struct hmdfs_sb_info * sbi,const char * filename)213 void reset_item_opened_status(struct hmdfs_sb_info *sbi, const char *filename)
214 {
215 struct qstr candidate = QSTR_INIT(filename, strlen(filename));
216 struct hmdfs_share_item *item = NULL;
217
218 spin_lock(&sbi->share_table.item_list_lock);
219 item = hmdfs_lookup_share_item(&sbi->share_table, &candidate);
220 if (item) {
221 item->opened = false;
222 queue_delayed_work(item->hst->share_item_timeout_wq,
223 &item->d_work, HZ * HMDFS_SHARE_ITEM_TIMEOUT_S);
224 }
225 spin_unlock(&sbi->share_table.item_list_lock);
226 }
227
hmdfs_close_share_item(struct hmdfs_sb_info * sbi,struct file * file,char * cid)228 void hmdfs_close_share_item(struct hmdfs_sb_info *sbi, struct file *file,
229 char *cid)
230 {
231 struct qstr relativepath;
232 const char *path_name;
233 struct hmdfs_share_item *item = NULL;
234
235 path_name = hmdfs_get_dentry_relative_path(file->f_path.dentry);
236 if (unlikely(!path_name)) {
237 hmdfs_err("get dentry relative path error");
238 return;
239 }
240
241 relativepath.name = path_name;
242 relativepath.len = strlen(path_name);
243
244 spin_lock(&sbi->share_table.item_list_lock);
245 item = hmdfs_lookup_share_item(&sbi->share_table, &relativepath);
246 if (unlikely(!item)) {
247 hmdfs_err("cannot get share item %s", relativepath.name);
248 goto unlock;
249 }
250
251 /*
252 * If the item is shared to all device, we should close the item directly.
253 */
254 if (!strcmp(item->cid, SHARE_ALL_DEVICE)) {
255 goto close;
256 }
257
258 if (unlikely(!is_dst_device(item->cid, cid))) {
259 hmdfs_err("item not right, dst cid is: %s", item->cid);
260 goto unlock;
261 }
262
263 /*
264 * After remote close, we should reset the opened status and restart
265 * delayed timeout work.
266 */
267 close:
268 item->opened = false;
269 queue_delayed_work(item->hst->share_item_timeout_wq, &item->d_work,
270 HZ * HMDFS_SHARE_ITEM_TIMEOUT_S);
271
272 unlock:
273 spin_unlock(&sbi->share_table.item_list_lock);
274 kfree(path_name);
275 }
276
hmdfs_check_share_access_permission(struct hmdfs_sb_info * sbi,const char * filename,char * cid)277 int hmdfs_check_share_access_permission(struct hmdfs_sb_info *sbi,
278 const char *filename,
279 char *cid)
280 {
281 struct qstr candidate = QSTR_INIT(filename, strlen(filename));
282 struct hmdfs_share_item *item = NULL;
283 int ret = -ENOENT;
284
285 spin_lock(&sbi->share_table.item_list_lock);
286 item = hmdfs_lookup_share_item(&sbi->share_table, &candidate);
287 /*
288 * When cid matches, we set the item status opened and canel
289 * its delayed work to ensure that the open process can get
290 * the correct path
291 */
292 if (item && (is_dst_device(item->cid, cid) || !strcmp(item->cid, SHARE_ALL_DEVICE))) {
293 item->opened = true;
294 if (!cancel_delayed_work_sync(&item->d_work)) {
295 item->timeout = false;
296 }
297 ret = 0;
298 }
299 spin_unlock(&sbi->share_table.item_list_lock);
300
301 return ret;
302 }
303
304
hmdfs_init_share_table(struct hmdfs_sb_info * sbi)305 int hmdfs_init_share_table(struct hmdfs_sb_info *sbi)
306 {
307 spin_lock_init(&sbi->share_table.item_list_lock);
308 INIT_LIST_HEAD(&sbi->share_table.item_list_head);
309 sbi->share_table.item_cnt = 0;
310 sbi->share_table.max_cnt = HMDFS_SHARE_ITEMS_MAX;
311 sbi->share_table.share_item_timeout_wq =
312 create_singlethread_workqueue("share_item_timeout_wq");
313
314 if (!sbi->share_table.share_item_timeout_wq)
315 return -ENOMEM;
316 return 0;
317 }
318
hmdfs_clear_share_table(struct hmdfs_sb_info * sbi)319 void hmdfs_clear_share_table(struct hmdfs_sb_info *sbi)
320 {
321 struct hmdfs_share_table *st = &sbi->share_table;
322 struct hmdfs_share_item *item, *tmp;
323
324 spin_lock(&sbi->share_table.item_list_lock);
325 list_for_each_entry_safe(item, tmp, &sbi->share_table.item_list_head,
326 list) {
327 flush_delayed_work(&item->d_work);
328 remove_and_release_share_item(item);
329 }
330 spin_unlock(&sbi->share_table.item_list_lock);
331
332 destroy_workqueue(st->share_item_timeout_wq);
333 }
334
hmdfs_clear_first_item(struct hmdfs_share_table * st)335 int hmdfs_clear_first_item(struct hmdfs_share_table *st)
336 {
337 int ret = -EMFILE;
338 struct hmdfs_share_item *item, *tmp;
339 list_for_each_entry_safe(item, tmp, &st->item_list_head, list) {
340 if (!item->timeout) {
341 cancel_delayed_work_sync(&item->d_work);
342 }
343 remove_and_release_share_item(item);
344 ret = 0;
345 break;
346 }
347 return ret;
348 }
349