1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/hmdfs_server.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "hmdfs_server.h"
9
10 #include <linux/file.h>
11 #include <linux/xattr.h>
12 #include <linux/namei.h>
13 #include <linux/statfs.h>
14 #include <linux/mount.h>
15
16 #include "authority/authentication.h"
17 #include "hmdfs.h"
18 #include "hmdfs_dentryfile.h"
19 #include "hmdfs_trace.h"
20 #include "server_writeback.h"
21 #include "comm/node_cb.h"
22
23 #define HMDFS_MAX_HIDDEN_DIR 1
24
25 struct hmdfs_open_info {
26 struct file *file;
27 struct inode *inode;
28 bool stat_valid;
29 struct kstat stat;
30 uint64_t real_ino;
31 int file_id;
32 };
33
insert_file_into_conn(struct hmdfs_peer * conn,struct file * file)34 static int insert_file_into_conn(struct hmdfs_peer *conn, struct file *file)
35 {
36 struct idr *idr = &(conn->file_id_idr);
37 int ret;
38
39 idr_preload(GFP_KERNEL);
40 spin_lock(&(conn->file_id_lock));
41 ret = idr_alloc_cyclic(idr, file, 0, 0, GFP_NOWAIT);
42 spin_unlock(&(conn->file_id_lock));
43 idr_preload_end();
44 return ret;
45 }
46
47 /*
48 * get_file_from_conn - get file from conn by file_id. It should be noted that
49 * an additional reference will be acquired for returned file, the called should
50 * put it after the file is not used anymore.
51 */
get_file_from_conn(struct hmdfs_peer * conn,__u32 file_id)52 static struct file *get_file_from_conn(struct hmdfs_peer *conn, __u32 file_id)
53 {
54 struct file *file;
55 struct idr *idr = &(conn->file_id_idr);
56
57 rcu_read_lock();
58 file = idr_find(idr, file_id);
59 if (file && !get_file_rcu(file))
60 file = NULL;
61 rcu_read_unlock();
62 return file;
63 }
64
remove_file_from_conn(struct hmdfs_peer * conn,__u32 file_id)65 void remove_file_from_conn(struct hmdfs_peer *conn, __u32 file_id)
66 {
67 spinlock_t *lock = &(conn->file_id_lock);
68 struct idr *idr = &(conn->file_id_idr);
69
70 spin_lock(lock);
71 idr_remove(idr, file_id);
72 spin_unlock(lock);
73 }
74
hmdfs_open_path(struct hmdfs_sb_info * sbi,const char * path)75 struct file *hmdfs_open_path(struct hmdfs_sb_info *sbi, const char *path)
76 {
77 struct path root_path;
78 struct file *file;
79 int err;
80 const char *root_name = sbi->local_dst;
81
82 err = kern_path(root_name, 0, &root_path);
83 if (err) {
84 hmdfs_info("kern_path failed: %d", err);
85 return ERR_PTR(err);
86 }
87 file = file_open_root(&root_path, path,
88 O_RDWR | O_LARGEFILE, 0644);
89 path_put(&root_path);
90 if (IS_ERR(file)) {
91 hmdfs_err(
92 "GRAPERR sb->s_readonly_remount %d sb_flag %lu",
93 sbi->sb->s_readonly_remount, sbi->sb->s_flags);
94 hmdfs_info("file_open_root failed: %ld", PTR_ERR(file));
95 } else {
96 hmdfs_info("get file with magic %lu",
97 file->f_inode->i_sb->s_magic);
98 }
99 return file;
100 }
101
is_dst_device(char * src_cid,char * dst_cid)102 inline bool is_dst_device(char *src_cid, char *dst_cid)
103 {
104 return strncmp(src_cid, dst_cid, HMDFS_CID_SIZE) == 0 ? true : false;
105 }
106
hmdfs_clear_share_item_offline(struct hmdfs_peer * conn)107 void hmdfs_clear_share_item_offline(struct hmdfs_peer *conn)
108 {
109 struct hmdfs_sb_info *sbi = conn->sbi;
110 struct hmdfs_share_item *item, *tmp;
111
112 spin_lock(&sbi->share_table.item_list_lock);
113 list_for_each_entry_safe(item, tmp, &sbi->share_table.item_list_head,
114 list) {
115 if (is_dst_device(item->cid, conn->cid)) {
116 list_del(&item->list);
117 release_share_item(item);
118 sbi->share_table.item_cnt--;
119 }
120 }
121 spin_unlock(&sbi->share_table.item_list_lock);
122 }
123
hmdfs_close_path(struct file * file)124 inline void hmdfs_close_path(struct file *file)
125 {
126 fput(file);
127 }
128
129 /* After offline server close all files opened by client */
hmdfs_server_offline_notify(struct hmdfs_peer * conn,int evt,unsigned int seq)130 void hmdfs_server_offline_notify(struct hmdfs_peer *conn, int evt,
131 unsigned int seq)
132 {
133 int id;
134 int count = 0;
135 unsigned int next;
136 struct file *filp = NULL;
137 struct idr *idr = &conn->file_id_idr;
138
139 /* wait all async work complete */
140 flush_workqueue(conn->req_handle_wq);
141 flush_workqueue(conn->async_wq);
142
143 /* If there is some open requests in processing,
144 * Maybe, we need to close file when peer offline
145 */
146 idr_for_each_entry(idr, filp, id) {
147 hmdfs_debug("[%d]Server close: id=%d", count, id);
148 hmdfs_close_path(filp);
149 count++;
150 if (count % HMDFS_IDR_RESCHED_COUNT == 0)
151 cond_resched();
152 }
153
154 hmdfs_clear_share_item_offline(conn);
155
156 /* Reinitialize idr */
157 next = idr_get_cursor(idr);
158 idr_destroy(idr);
159
160 idr_init(idr);
161 idr_set_cursor(idr, next);
162
163 /* Make old file id to be stale */
164 conn->fid_cookie++;
165 }
166
167 static struct hmdfs_node_cb_desc server_cb[] = {
168 {
169 .evt = NODE_EVT_OFFLINE,
170 .sync = true,
171 .min_version = DFS_2_0,
172 .fn = hmdfs_server_offline_notify
173 },
174 };
175
hmdfs_server_add_node_evt_cb(void)176 void __init hmdfs_server_add_node_evt_cb(void)
177 {
178 hmdfs_node_add_evt_cb(server_cb, ARRAY_SIZE(server_cb));
179 }
180
181 static const char *datasl_str[] = {
182 "s0", "s1", "s2", "s3", "s4"
183 };
184
parse_data_sec_level(const char * sl_value,size_t sl_value_len)185 static int parse_data_sec_level(const char *sl_value, size_t sl_value_len)
186 {
187 int i;
188
189 for (i = 0; i < sizeof(datasl_str) / sizeof(datasl_str[0]); i++) {
190 if (!strncmp(sl_value, datasl_str[i], strlen(datasl_str[i])))
191 return i + DATA_SEC_LEVEL0;
192 }
193
194 return DATA_SEC_LEVEL3;
195 }
196
check_sec_level(struct hmdfs_peer * node,const char * file_name)197 static int check_sec_level(struct hmdfs_peer *node, const char *file_name)
198 {
199 int err;
200 int ret = 0;
201 struct path root_path;
202 struct path file_path;
203 char *value = NULL;
204 size_t value_len = DATA_SEC_LEVEL_LENGTH;
205
206 if (node->devsl <= 0) {
207 ret = -EACCES;
208 goto out_free;
209 }
210
211 value = kzalloc(value_len, GFP_KERNEL);
212 if (!value) {
213 ret = -ENOMEM;
214 goto out_free;
215 }
216
217 err = kern_path(node->sbi->local_dst, LOOKUP_DIRECTORY, &root_path);
218 if (err) {
219 hmdfs_err("get root path error");
220 ret = err;
221 goto out_free;
222 }
223
224 err = vfs_path_lookup(root_path.dentry, root_path.mnt, file_name, 0,
225 &file_path);
226 if (err) {
227 hmdfs_err("get file path error");
228 ret = err;
229 goto out_err;
230 }
231
232 err = vfs_getxattr(file_path.dentry, DATA_SEC_LEVEL_LABEL, value,
233 value_len);
234 if (err <= 0 && node->devsl >= DATA_SEC_LEVEL3)
235 goto out;
236 if (err > 0 && node->devsl >= parse_data_sec_level(value, err))
237 goto out;
238
239 ret = -EACCES;
240 out:
241 path_put(&file_path);
242 out_err:
243 path_put(&root_path);
244 out_free:
245 kfree(value);
246 return ret;
247 }
248
hmdfs_check_share_access_permission(struct hmdfs_sb_info * sbi,const char * filename,char * cid,struct hmdfs_share_item ** item)249 static int hmdfs_check_share_access_permission(struct hmdfs_sb_info *sbi,
250 const char *filename, char *cid, struct hmdfs_share_item **item)
251 {
252 struct qstr candidate = QSTR_INIT(filename, strlen(filename));
253 int ret = -ENOENT;
254
255 spin_lock(&sbi->share_table.item_list_lock);
256 *item = hmdfs_lookup_share_item(&sbi->share_table, &candidate);
257 if (*item && is_dst_device((*item)->cid, cid)) {
258 spin_unlock(&sbi->share_table.item_list_lock);
259 return 0;
260 } else
261 *item = NULL;
262 spin_unlock(&sbi->share_table.item_list_lock);
263
264 return ret;
265 }
266
hmdfs_open_file(struct hmdfs_peer * con,const char * filename,uint8_t file_type,int * file_id)267 static struct file *hmdfs_open_file(struct hmdfs_peer *con,
268 const char *filename, uint8_t file_type,
269 int *file_id)
270 {
271 struct file *file = NULL;
272 struct hmdfs_share_item *item = NULL;
273 int err = 0;
274 int id;
275
276 if (!filename) {
277 hmdfs_err("filename is NULL");
278 return ERR_PTR(-EINVAL);
279 }
280
281 if (check_sec_level(con, filename)) {
282 hmdfs_err("devsl permission denied");
283 return ERR_PTR(-EACCES);
284 }
285
286 if (hm_isshare(file_type)) {
287 err = hmdfs_check_share_access_permission(con->sbi,
288 filename, con->cid, &item);
289 if (err)
290 return ERR_PTR(err);
291 }
292 file = hmdfs_open_path(con->sbi, filename);
293
294 if (IS_ERR(file))
295 return file;
296
297 id = insert_file_into_conn(con, file);
298 if (id < 0) {
299 hmdfs_err("file_id alloc failed! err=%d", id);
300 hmdfs_close_path(file);
301 return ERR_PTR(id);
302 }
303 *file_id = id;
304
305 /* get item to avoid timeout */
306 if (item)
307 kref_get(&item->ref);
308
309 return file;
310 }
311
msec_to_timespec(unsigned int msec)312 static struct hmdfs_time_t msec_to_timespec(unsigned int msec)
313 {
314 struct hmdfs_time_t timespec = {
315 .tv_sec = msec / MSEC_PER_SEC,
316 .tv_nsec = (msec % MSEC_PER_SEC) * NSEC_PER_MSEC,
317 };
318
319 return timespec;
320 }
321
hmdfs_current_kernel_time(void)322 static struct hmdfs_time_t hmdfs_current_kernel_time(void)
323 {
324 struct hmdfs_time_t time;
325
326 #if KERNEL_VERSION(4, 18, 0) < LINUX_VERSION_CODE
327 ktime_get_coarse_real_ts64(&time);
328 #else
329 time = current_kernel_time();
330 #endif
331 return time;
332 }
333
334 /*
335 * Generate fid version like following format:
336 *
337 * | boot cookie | con cookie |
338 * |---------------------|-------------|
339 * 49 15 (bits)
340 */
hmdfs_server_pack_fid_ver(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd)341 static uint64_t hmdfs_server_pack_fid_ver(struct hmdfs_peer *con,
342 struct hmdfs_head_cmd *cmd)
343 {
344 uint64_t boot_cookie = con->sbi->boot_cookie;
345 uint16_t con_cookie = con->fid_cookie;
346
347 return (boot_cookie |
348 (con_cookie & ((1 << HMDFS_FID_VER_BOOT_COOKIE_SHIFT) - 1)));
349 }
350
get_file_by_fid_and_ver(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,__u32 file_id,__u64 file_ver)351 static struct file *get_file_by_fid_and_ver(struct hmdfs_peer *con,
352 struct hmdfs_head_cmd *cmd,
353 __u32 file_id, __u64 file_ver)
354 {
355 struct file *file = NULL;
356 __u64 cur_file_ver = hmdfs_server_pack_fid_ver(con, cmd);
357
358 if (file_ver != cur_file_ver) {
359 hmdfs_warning("Stale file version %llu for fid %u (ver %llu)",
360 file_ver, file_id, cur_file_ver);
361 return ERR_PTR(-EBADF);
362 }
363
364 file = get_file_from_conn(con, file_id);
365 if (!file)
366 return ERR_PTR(-EBADF);
367
368 return file;
369 }
370
hmdfs_update_open_response(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,struct hmdfs_open_info * info,struct open_response * resp)371 static void hmdfs_update_open_response(struct hmdfs_peer *con,
372 struct hmdfs_head_cmd *cmd,
373 struct hmdfs_open_info *info,
374 struct open_response *resp)
375 {
376 struct hmdfs_time_t current_time = hmdfs_current_kernel_time();
377 struct hmdfs_time_t ctime = info->stat_valid ? info->stat.ctime :
378 info->inode->i_ctime;
379 struct hmdfs_time_t precision =
380 msec_to_timespec(con->sbi->dcache_precision);
381 loff_t size = info->stat_valid ? info->stat.size :
382 i_size_read(info->inode);
383
384 resp->ino = cpu_to_le64(info->real_ino);
385 resp->file_ver = cpu_to_le64(hmdfs_server_pack_fid_ver(con, cmd));
386 resp->file_id = cpu_to_le32(info->file_id);
387 resp->file_size = cpu_to_le64(size);
388 resp->ctime = cpu_to_le64(ctime.tv_sec);
389 resp->ctime_nsec = cpu_to_le32(ctime.tv_nsec);
390
391 /*
392 * In server, ctime might stay the same after coverwrite. We introduce a
393 * new value stable_ctime to handle the problem.
394 * - if open rpc time < ctime, stable_ctime = 0;
395 * - if ctime <= open rpc time < ctime + dcache_precision, stable_ctime
396 * = ctime
397 * - else, stable_ctime = ctime + dcache_precision;
398 */
399 precision = hmdfs_time_add(ctime, precision);
400 if (hmdfs_time_compare(¤t_time, &ctime) < 0) {
401 resp->stable_ctime = cpu_to_le64(0);
402 resp->stable_ctime_nsec = cpu_to_le32(0);
403 } else if (hmdfs_time_compare(¤t_time, &ctime) >= 0 &&
404 hmdfs_time_compare(¤t_time, &precision) < 0) {
405 resp->stable_ctime = resp->ctime;
406 resp->stable_ctime_nsec = resp->ctime_nsec;
407 } else {
408 resp->stable_ctime = cpu_to_le64(precision.tv_sec);
409 resp->stable_ctime_nsec = cpu_to_le32(precision.tv_nsec);
410 }
411 }
412
hmdfs_get_open_info(struct hmdfs_peer * con,uint8_t file_type,const char * filename,struct hmdfs_open_info * info)413 static int hmdfs_get_open_info(struct hmdfs_peer *con, uint8_t file_type,
414 const char *filename,
415 struct hmdfs_open_info *info)
416 {
417 int ret = 0;
418
419 info->inode = file_inode(info->file);
420 info->stat_valid = false;
421 if (con->sbi->sb == info->inode->i_sb) {
422 /* if open a regular file */
423 info->inode = hmdfs_i(info->inode)->lower_inode;
424 } else if (con->sbi->lower_sb != info->inode->i_sb) {
425 /* It's possible that inode is not from lower, for example:
426 * 1. touch /f2fs/file
427 * 2. ln -s /sdcard_fs/file /f2fs/link
428 * 3. cat /hmdfs/link -> generate dentry cache in sdcard_fs
429 * 4. echo hi >> /hmdfs/file -> append write not through
430 * sdcard_fs
431 * 5. cat /hmdfs/link -> got inode in sdcard, which size is
432 * still 0
433 *
434 * If src file isn't in lower, use getattr to get
435 * information.
436 */
437 ret = vfs_getattr(&info->file->f_path, &info->stat, STATX_BASIC_STATS | STATX_BTIME,
438 0);
439 if (ret) {
440 hmdfs_err("call vfs_getattr failed, err %d", ret);
441 return ret;
442 }
443 info->stat_valid = true;
444 }
445
446 info->real_ino = generate_u64_ino(info->inode->i_ino,
447 info->inode->i_generation);
448
449 return 0;
450 }
451
hmdfs_server_open(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)452 void hmdfs_server_open(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
453 void *data)
454 {
455 struct open_request *recv = data;
456 int sizeread = sizeof(struct open_response);
457 struct open_response *resp = NULL;
458 struct hmdfs_open_info *info = NULL;
459 int ret = 0;
460
461 trace_hmdfs_server_open_enter(con, recv);
462
463 resp = kzalloc(sizeread, GFP_KERNEL);
464 info = kmalloc(sizeof(*info), GFP_KERNEL);
465 if (!resp || !info) {
466 ret = -ENOMEM;
467 goto err_free;
468 }
469
470 info->file = hmdfs_open_file(con, recv->buf, recv->file_type,
471 &info->file_id);
472 if (IS_ERR(info->file)) {
473 ret = PTR_ERR(info->file);
474 goto err_free;
475 }
476
477 ret = hmdfs_get_open_info(con, recv->file_type, recv->buf, info);
478 if (ret)
479 goto err_close;
480
481 hmdfs_update_open_response(con, cmd, info, resp);
482
483 trace_hmdfs_server_open_exit(con, resp, info->file, 0);
484 ret = hmdfs_sendmessage_response(con, cmd, sizeread, resp, 0);
485 if (ret) {
486 hmdfs_err("sending msg response failed, file_id %d, err %d",
487 info->file_id, ret);
488 remove_file_from_conn(con, info->file_id);
489 hmdfs_close_path(info->file);
490 }
491 kfree(resp);
492 kfree(info);
493 return;
494
495 err_close:
496 remove_file_from_conn(con, info->file_id);
497 hmdfs_close_path(info->file);
498 err_free:
499 kfree(resp);
500 kfree(info);
501 trace_hmdfs_server_open_exit(con, NULL, NULL, ret);
502 hmdfs_send_err_response(con, cmd, ret);
503 }
504
hmdfs_check_and_create(struct path * path_parent,struct dentry * dentry,uint64_t device_id,umode_t mode,bool is_excl)505 static int hmdfs_check_and_create(struct path *path_parent,
506 struct dentry *dentry, uint64_t device_id,
507 umode_t mode, bool is_excl)
508 {
509 int err = 0;
510
511 /* if inode doesn't exist, create it */
512 if (d_is_negative(dentry)) {
513 hmdfs_mark_drop_flag(device_id, path_parent->dentry);
514 err = vfs_create(d_inode(path_parent->dentry), dentry, mode,
515 is_excl);
516 if (err)
517 hmdfs_err("create failed, err %d", err);
518 } else {
519 if (is_excl)
520 err = -EEXIST;
521 else if (S_ISLNK(d_inode(dentry)->i_mode))
522 err = -EINVAL;
523 else if (S_ISDIR(d_inode(dentry)->i_mode))
524 err = -EISDIR;
525 }
526
527 return err;
528 }
hmdfs_lookup_create(struct hmdfs_peer * con,struct atomic_open_request * recv,struct path * child_path,bool * truncate)529 static int hmdfs_lookup_create(struct hmdfs_peer *con,
530 struct atomic_open_request *recv,
531 struct path *child_path, bool *truncate)
532 {
533 int err = 0;
534 struct path path_root;
535 struct path path_parent;
536 uint32_t open_flags = le32_to_cpu(recv->open_flags);
537 char *path = recv->buf;
538 char *filename = recv->buf + le32_to_cpu(recv->path_len) + 1;
539 struct dentry *dentry = NULL;
540
541 err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &path_root);
542 if (err) {
543 hmdfs_err("no path for %s, err %d", con->sbi->local_dst, err);
544 return err;
545 }
546
547 err = vfs_path_lookup(path_root.dentry, path_root.mnt, path,
548 LOOKUP_DIRECTORY, &path_parent);
549 if (err) {
550 hmdfs_info("no dir in %s, err %d", con->sbi->local_dst, err);
551 goto put_path_root;
552 }
553
554 inode_lock(d_inode(path_parent.dentry));
555 dentry = lookup_one_len(filename, path_parent.dentry, strlen(filename));
556 if (IS_ERR(dentry)) {
557 err = PTR_ERR(dentry);
558 inode_unlock(d_inode(path_parent.dentry));
559 goto put_path_parent;
560 }
561 /* only truncate if inode already exists */
562 *truncate = ((open_flags & HMDFS_O_TRUNC) && d_is_positive(dentry));
563 err = hmdfs_check_and_create(&path_parent, dentry, con->device_id,
564 le16_to_cpu(recv->mode),
565 open_flags & HMDFS_O_EXCL);
566 inode_unlock(d_inode(path_parent.dentry));
567 if (err) {
568 dput(dentry);
569 } else {
570 child_path->dentry = dentry;
571 child_path->mnt = mntget(path_parent.mnt);
572 }
573
574 put_path_parent:
575 path_put(&path_parent);
576 put_path_root:
577 path_put(&path_root);
578 return err;
579 }
580
hmdfs_dentry_open(struct hmdfs_peer * con,const struct path * path,struct hmdfs_open_info * info)581 static int hmdfs_dentry_open(struct hmdfs_peer *con,
582 const struct path *path,
583 struct hmdfs_open_info *info)
584 {
585 int err = 0;
586
587 info->file = dentry_open(path, O_RDWR | O_LARGEFILE, current_cred());
588 if (IS_ERR(info->file)) {
589 err = PTR_ERR(info->file);
590 hmdfs_err("open file failed, err %d", err);
591 return err;
592 }
593
594 info->file_id = insert_file_into_conn(con, info->file);
595 if (info->file_id < 0) {
596 err = info->file_id;
597 hmdfs_err("file_id alloc failed! err %d", err);
598 hmdfs_close_path(info->file);
599 return err;
600 }
601
602 return 0;
603 }
604
hmdfs_server_do_atomic_open(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,struct atomic_open_request * recv,struct hmdfs_open_info * info,struct atomic_open_response * resp)605 static int hmdfs_server_do_atomic_open(struct hmdfs_peer *con,
606 struct hmdfs_head_cmd *cmd,
607 struct atomic_open_request *recv,
608 struct hmdfs_open_info *info,
609 struct atomic_open_response *resp)
610 {
611 struct path child_path;
612 bool truncate = false;
613 int err = 0;
614
615 err = hmdfs_lookup_create(con, recv, &child_path, &truncate);
616 if (err)
617 return err;
618
619 err = hmdfs_dentry_open(con, &child_path, info);
620 if (err)
621 goto put_child;
622
623 err = hmdfs_get_open_info(con, HM_REG, NULL, info);
624 if (err)
625 goto fail_close;
626
627 if (truncate) {
628 err = vfs_truncate(&child_path, 0);
629 if (err) {
630 hmdfs_err("truncate failed, err %d", err);
631 goto fail_close;
632 }
633 }
634 hmdfs_update_open_response(con, cmd, info, &resp->open_resp);
635 resp->i_mode = cpu_to_le16(file_inode(info->file)->i_mode);
636
637 fail_close:
638 if (err) {
639 remove_file_from_conn(con, info->file_id);
640 hmdfs_close_path(info->file);
641 }
642 put_child:
643 path_put(&child_path);
644 return err;
645 }
646
hmdfs_server_atomic_open(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)647 void hmdfs_server_atomic_open(struct hmdfs_peer *con,
648 struct hmdfs_head_cmd *cmd, void *data)
649 {
650 int err;
651 struct atomic_open_request *recv = data;
652 struct atomic_open_response *resp = NULL;
653 struct hmdfs_open_info *info = NULL;
654
655 info = kmalloc(sizeof(*info), GFP_KERNEL);
656 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
657 if (!resp || !info) {
658 err = -ENOMEM;
659 goto out;
660 }
661
662 err = hmdfs_server_do_atomic_open(con, cmd, recv, info, resp);
663
664 out:
665 if (err) {
666 hmdfs_send_err_response(con, cmd, err);
667 } else {
668 err = hmdfs_sendmessage_response(con, cmd, sizeof(*resp), resp,
669 0);
670 if (err) {
671 hmdfs_err("sending msg response failed, file_id %d, err %d",
672 info->file_id, err);
673 remove_file_from_conn(con, info->file_id);
674 hmdfs_close_path(info->file);
675 }
676 }
677 kfree(info);
678 kfree(resp);
679 }
680
hmdfs_close_share_item(struct hmdfs_sb_info * sbi,struct file * file,char * cid)681 void hmdfs_close_share_item(struct hmdfs_sb_info *sbi, struct file *file,
682 char *cid)
683 {
684 struct qstr relativepath;
685 const char *path_name;
686 struct hmdfs_share_item *item = NULL;
687
688 path_name = hmdfs_get_dentry_relative_path(file->f_path.dentry);
689 if (unlikely(!path_name)) {
690 hmdfs_err("get dentry relative path error");
691 return;
692 }
693
694 relativepath.name = path_name;
695 relativepath.len = strlen(path_name);
696
697 item = hmdfs_lookup_share_item(&sbi->share_table, &relativepath);
698
699 if (item) {
700 if (unlikely(!is_dst_device(item->cid, cid))) {
701 hmdfs_err("item not right");
702 goto err_out;
703 }
704
705 if (unlikely(kref_read(&item->ref) == 1))
706 hmdfs_err("item ref error");
707
708 set_item_timeout(item);
709 kref_put(&item->ref, hmdfs_remove_share_item);
710 } else
711 hmdfs_err("cannot get share item %s", relativepath.name);
712
713 err_out:
714 kfree(path_name);
715 }
716
hmdfs_server_release(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)717 void hmdfs_server_release(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
718 void *data)
719 {
720 struct release_request *release_recv = data;
721 struct file *file = NULL;
722 __u32 file_id;
723 __u64 file_ver;
724 int ret = 0;
725
726 file_id = le32_to_cpu(release_recv->file_id);
727 file_ver = le64_to_cpu(release_recv->file_ver);
728 file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver);
729 if (IS_ERR(file)) {
730 hmdfs_err("cannot find %u", file_id);
731 ret = PTR_ERR(file);
732 goto out;
733 }
734
735 if (hmdfs_is_share_file(file))
736 hmdfs_close_share_item(con->sbi, file, con->cid);
737
738 /* put the reference acquired by get_file_by_fid_and_ver() */
739 hmdfs_close_path(file);
740 hmdfs_info("close %u", file_id);
741 remove_file_from_conn(con, file_id);
742
743 hmdfs_close_path(file);
744
745 out:
746 trace_hmdfs_server_release(con, file_id, file_ver, ret);
747 set_conn_sock_quickack(con);
748 }
749
hmdfs_server_fsync(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)750 void hmdfs_server_fsync(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
751 void *data)
752 {
753 struct fsync_request *fsync_recv = data;
754 __s32 datasync = le32_to_cpu(fsync_recv->datasync);
755 __s64 start = le64_to_cpu(fsync_recv->start);
756 __s64 end = le64_to_cpu(fsync_recv->end);
757 struct file *file = NULL;
758 __u32 file_id;
759 __u64 file_ver;
760 int ret = 0;
761
762 file_id = le32_to_cpu(fsync_recv->file_id);
763 file_ver = le64_to_cpu(fsync_recv->file_ver);
764 file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver);
765 if (IS_ERR(file)) {
766 hmdfs_err("cannot find %u", file_id);
767 ret = PTR_ERR(file);
768 goto out;
769 }
770
771 ret = vfs_fsync_range(file, start, end, datasync);
772 if (ret)
773 hmdfs_err("fsync fail, ret %d", ret);
774
775 hmdfs_close_path(file);
776 out:
777 hmdfs_send_err_response(con, cmd, ret);
778 }
779
hmdfs_server_readpage(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)780 void hmdfs_server_readpage(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
781 void *data)
782 {
783 struct readpage_request *readpage_recv = data;
784 __u64 file_ver;
785 __u32 file_id;
786 struct file *file = NULL;
787 loff_t pos;
788 struct readpage_response *readpage = NULL;
789 int ret = 0;
790 size_t read_len;
791
792 file_id = le32_to_cpu(readpage_recv->file_id);
793 file_ver = le64_to_cpu(readpage_recv->file_ver);
794 file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver);
795 if (IS_ERR(file)) {
796 hmdfs_info(
797 "file with id %u does not exist, pgindex %llu, devid %llu",
798 file_id, le64_to_cpu(readpage_recv->index),
799 con->device_id);
800 ret = PTR_ERR(file);
801 goto fail;
802 }
803
804 read_len = (size_t)le32_to_cpu(readpage_recv->size);
805 if (read_len == 0)
806 goto fail_put_file;
807
808 readpage = kmalloc(read_len, GFP_KERNEL);
809 if (!readpage) {
810 ret = -ENOMEM;
811 goto fail_put_file;
812 }
813
814 pos = (loff_t)le64_to_cpu(readpage_recv->index) << HMDFS_PAGE_OFFSET;
815 ret = kernel_read(file, readpage->buf, read_len, &pos);
816 if (ret < 0) {
817 hmdfs_send_err_response(con, cmd, -EIO);
818 } else {
819 if (ret != read_len)
820 memset(readpage->buf + ret, 0, read_len - ret);
821 hmdfs_sendmessage_response(con, cmd, read_len, readpage, 0);
822 }
823
824 hmdfs_close_path(file);
825 kfree(readpage);
826 return;
827
828 fail_put_file:
829 hmdfs_close_path(file);
830 fail:
831 hmdfs_send_err_response(con, cmd, ret);
832 }
833
alloc_readpages_resp(unsigned int len)834 static struct readpages_response *alloc_readpages_resp(unsigned int len)
835 {
836 struct readpages_response *resp = NULL;
837
838 if (len > HMDFS_PAGE_SIZE)
839 resp = vmalloc(len);
840 else
841 resp = kmalloc(len, GFP_KERNEL);
842
843 return resp;
844 }
845
free_readpages_resp(struct readpages_response * resp,unsigned int len)846 static void free_readpages_resp(struct readpages_response *resp,
847 unsigned int len)
848 {
849 if (len > HMDFS_PAGE_SIZE)
850 vfree(resp);
851 else
852 kfree(resp);
853 }
854
hmdfs_server_readpages(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)855 void hmdfs_server_readpages(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
856 void *data)
857 {
858 struct readpages_request *req = data;
859 __u64 file_ver;
860 __u32 file_id;
861 struct file *file = NULL;
862 loff_t pos;
863 struct readpages_response *resp = NULL;
864 ssize_t ret = 0;
865 size_t read_len;
866
867 file_id = le32_to_cpu(req->file_id);
868 file_ver = le64_to_cpu(req->file_ver);
869 file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver);
870 if (IS_ERR(file)) {
871 ret = PTR_ERR(file);
872 goto fail;
873 }
874
875 read_len = (size_t)le32_to_cpu(req->size);
876 if (read_len == 0)
877 goto fail_put_file;
878
879 resp = alloc_readpages_resp(read_len);
880 if (!resp) {
881 ret = -ENOMEM;
882 goto fail_put_file;
883 }
884
885 pos = (loff_t)le64_to_cpu(req->index) << HMDFS_PAGE_OFFSET;
886 ret = kernel_read(file, resp->buf, read_len, &pos);
887 if (ret < 0) {
888 ret = -EIO;
889 goto fail_free_resp;
890 }
891
892 hmdfs_sendmessage_response(con, cmd, ret, resp, 0);
893 hmdfs_close_path(file);
894 free_readpages_resp(resp, read_len);
895 return;
896
897 fail_free_resp:
898 free_readpages_resp(resp, read_len);
899 fail_put_file:
900 hmdfs_close_path(file);
901 fail:
902 hmdfs_send_err_response(con, cmd, ret);
903 }
904
hmdfs_do_readpages_open(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,struct readpages_open_request * recv,struct hmdfs_open_info * info,struct readpages_open_response * resp)905 static int hmdfs_do_readpages_open(struct hmdfs_peer *con,
906 struct hmdfs_head_cmd *cmd,
907 struct readpages_open_request *recv,
908 struct hmdfs_open_info *info,
909 struct readpages_open_response *resp)
910 {
911 int ret = 0;
912 loff_t pos = 0;
913
914 info->file = hmdfs_open_file(con, recv->buf, recv->file_type,
915 &info->file_id);
916 if (IS_ERR(info->file))
917 return PTR_ERR(info->file);
918
919 ret = hmdfs_get_open_info(con, recv->file_type, recv->buf, info);
920 if (ret)
921 goto fail_close;
922
923 pos = (loff_t)le64_to_cpu(recv->index) << HMDFS_PAGE_OFFSET;
924 ret = kernel_read(info->file, resp->buf, le32_to_cpu(recv->size), &pos);
925 if (ret < 0)
926 goto fail_close;
927
928 hmdfs_update_open_response(con, cmd, info, &resp->open_resp);
929 memset(resp->reserved, 0, sizeof(resp->reserved));
930 ret = hmdfs_sendmessage_response(con, cmd, sizeof(*resp) + ret, resp,
931 0);
932 if (ret) {
933 hmdfs_err("sending msg response failed, file_id %d, err %d",
934 info->file_id, ret);
935 ret = 0;
936 goto fail_close;
937 }
938 return 0;
939
940 fail_close:
941 remove_file_from_conn(con, info->file_id);
942 hmdfs_close_path(info->file);
943 return ret;
944 }
945
hmdfs_server_readpages_open(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)946 void hmdfs_server_readpages_open(struct hmdfs_peer *con,
947 struct hmdfs_head_cmd *cmd, void *data)
948 {
949 struct readpages_open_request *recv = data;
950 struct readpages_open_response *resp = NULL;
951 int ret = -EINVAL;
952 size_t read_len = 0;
953 size_t resp_len = 0;
954 struct hmdfs_open_info *info = NULL;
955
956 info = kmalloc(sizeof(*info), GFP_KERNEL);
957 if (!info) {
958 ret = -ENOMEM;
959 goto fail;
960 }
961
962 read_len = (size_t)le32_to_cpu(recv->size);
963 if (read_len == 0) {
964 ret = -EINVAL;
965 goto fail_free_info;
966 }
967 resp_len = read_len + sizeof(*resp);
968 resp = vmalloc(resp_len);
969 if (!resp) {
970 ret = -ENOMEM;
971 goto fail_free_info;
972 }
973
974 ret = hmdfs_do_readpages_open(con, cmd, recv, info, resp);
975
976 vfree(resp);
977 fail_free_info:
978 kfree(info);
979 fail:
980 if (ret)
981 hmdfs_send_err_response(con, cmd, ret);
982 }
983
need_rebuild_dcache(struct hmdfs_dcache_header * h,struct hmdfs_time_t time,unsigned int precision)984 static bool need_rebuild_dcache(struct hmdfs_dcache_header *h,
985 struct hmdfs_time_t time,
986 unsigned int precision)
987 {
988 struct hmdfs_time_t crtime = { .tv_sec = le64_to_cpu(h->dcache_crtime),
989 .tv_nsec = le64_to_cpu(
990 h->dcache_crtime_nsec) };
991 struct hmdfs_time_t ctime = { .tv_sec = le64_to_cpu(h->dentry_ctime),
992 .tv_nsec = le64_to_cpu(
993 h->dentry_ctime_nsec) };
994 struct hmdfs_time_t pre_time = { .tv_sec = precision / MSEC_PER_SEC,
995 .tv_nsec = precision % MSEC_PER_SEC *
996 NSEC_PER_MSEC };
997
998 if (hmdfs_time_compare(&time, &ctime) != 0)
999 return true;
1000
1001 pre_time = hmdfs_time_add(time, pre_time);
1002 if (hmdfs_time_compare(&crtime, &pre_time) < 0)
1003 return true;
1004
1005 return false;
1006 }
1007
hmdfs_server_cache_validate(struct file * filp,struct inode * inode,unsigned long precision)1008 static bool hmdfs_server_cache_validate(struct file *filp, struct inode *inode,
1009 unsigned long precision)
1010 {
1011 struct hmdfs_dcache_header header;
1012 int overallpage;
1013 ssize_t bytes;
1014 loff_t pos = 0;
1015
1016 overallpage = get_dentry_group_cnt(file_inode(filp));
1017 if (overallpage == 0) {
1018 hmdfs_err("cache file size is 0");
1019 return false;
1020 }
1021
1022 bytes = kernel_read(filp, &header, sizeof(header), &pos);
1023 if (bytes != sizeof(header)) {
1024 hmdfs_err("read file failed, err:%zd", bytes);
1025 return false;
1026 }
1027
1028 return !need_rebuild_dcache(&header, inode->i_ctime, precision);
1029 }
1030
hmdfs_server_cache_revalidate(struct hmdfs_sb_info * sbi,const char * recvpath,struct path * path)1031 struct file *hmdfs_server_cache_revalidate(struct hmdfs_sb_info *sbi,
1032 const char *recvpath,
1033 struct path *path)
1034 {
1035 struct cache_file_node *cfn = NULL;
1036 struct file *file;
1037
1038 cfn = find_cfn(sbi, HMDFS_SERVER_CID, recvpath, true);
1039 if (!cfn)
1040 return NULL;
1041
1042 if (!hmdfs_server_cache_validate(cfn->filp, path->dentry->d_inode,
1043 sbi->dcache_precision)) {
1044 remove_cfn(cfn);
1045 release_cfn(cfn);
1046 return NULL;
1047 }
1048 file = cfn->filp;
1049 get_file(cfn->filp);
1050 release_cfn(cfn);
1051
1052 return file;
1053 }
1054
hmdfs_client_cache_validate(struct hmdfs_sb_info * sbi,struct readdir_request * readdir_recv,struct path * path)1055 bool hmdfs_client_cache_validate(struct hmdfs_sb_info *sbi,
1056 struct readdir_request *readdir_recv,
1057 struct path *path)
1058 {
1059 struct inode *inode = path->dentry->d_inode;
1060 struct hmdfs_dcache_header header;
1061
1062 /* always rebuild dentryfile for small dir */
1063 if (le64_to_cpu(readdir_recv->num) < sbi->dcache_threshold)
1064 return false;
1065
1066 header.dcache_crtime = readdir_recv->dcache_crtime;
1067 header.dcache_crtime_nsec = readdir_recv->dcache_crtime_nsec;
1068 header.dentry_ctime = readdir_recv->dentry_ctime;
1069 header.dentry_ctime_nsec = readdir_recv->dentry_ctime_nsec;
1070
1071 return !need_rebuild_dcache(&header, inode->i_ctime,
1072 sbi->dcache_precision);
1073 }
1074
server_lower_dentry_path_raw(struct hmdfs_peer * peer,struct dentry * lo_d)1075 static char *server_lower_dentry_path_raw(struct hmdfs_peer *peer,
1076 struct dentry *lo_d)
1077 {
1078 struct hmdfs_dentry_info *di = hmdfs_d(peer->sbi->sb->s_root);
1079 struct dentry *lo_d_root = di->lower_path.dentry;
1080 struct dentry *lo_d_tmp = NULL;
1081 char *lo_p_buf = NULL;
1082 char *buf_head = NULL;
1083 char *buf_tail = NULL;
1084 size_t path_len = 0;
1085
1086 lo_p_buf = kzalloc(PATH_MAX, GFP_KERNEL);
1087 if (unlikely(!lo_p_buf))
1088 return ERR_PTR(-ENOMEM);
1089
1090 /* To generate a reversed path str */
1091 for (lo_d_tmp = lo_d; lo_d_tmp != lo_d_root && !IS_ROOT(lo_d_tmp);
1092 lo_d_tmp = lo_d_tmp->d_parent) {
1093 u32 dlen = lo_d_tmp->d_name.len;
1094 int reverse_index = dlen - 1;
1095
1096 /* Considering the appended slash and '\0' */
1097 if (unlikely(path_len + dlen + 1 > PATH_MAX - 1)) {
1098 kfree(lo_p_buf);
1099 return ERR_PTR(-ENAMETOOLONG);
1100 }
1101 for (; reverse_index >= 0; --reverse_index)
1102 lo_p_buf[path_len++] =
1103 lo_d_tmp->d_name.name[reverse_index];
1104 lo_p_buf[path_len++] = '/';
1105 }
1106
1107 /* Reverse the reversed path str to get the real path str */
1108 for (buf_head = lo_p_buf, buf_tail = lo_p_buf + path_len - 1;
1109 buf_head < buf_tail; ++buf_head, --buf_tail)
1110 swap(*buf_head, *buf_tail);
1111
1112 if (path_len == 0)
1113 lo_p_buf[0] = '/';
1114 return lo_p_buf;
1115 }
1116
server_lookup(struct hmdfs_peer * peer,const char * req_path,struct path * path)1117 static int server_lookup(struct hmdfs_peer *peer, const char *req_path,
1118 struct path *path)
1119 {
1120 struct path root_path;
1121 int err = 0;
1122
1123 err = kern_path(peer->sbi->local_dst, 0, &root_path);
1124 if (err)
1125 goto out_noroot;
1126
1127 err = vfs_path_lookup(root_path.dentry, root_path.mnt, req_path,
1128 LOOKUP_DIRECTORY, path);
1129 path_put(&root_path);
1130 out_noroot:
1131 return err;
1132 }
1133
1134 /**
1135 * server_lookup_lower - lookup lower file-system
1136 * @peer: target device node
1137 * @req_path: abs path (mount point as the root) from the request
1138 * @lo_o: the lower path to return
1139 *
1140 * return the lower path's name, with characters' cases matched
1141 */
server_lookup_lower(struct hmdfs_peer * peer,const char * req_path,struct path * lo_p)1142 static char *server_lookup_lower(struct hmdfs_peer *peer, const char *req_path,
1143 struct path *lo_p)
1144 {
1145 char *lo_p_name = ERR_PTR(-ENOENT);
1146 struct path up_p;
1147 int err = 0;
1148
1149 err = server_lookup(peer, req_path, &up_p);
1150 if (err)
1151 goto out;
1152
1153 hmdfs_get_lower_path(up_p.dentry, lo_p);
1154 path_put(&up_p);
1155
1156 lo_p_name = server_lower_dentry_path_raw(peer, lo_p->dentry);
1157 if (IS_ERR(lo_p_name)) {
1158 err = PTR_ERR(lo_p_name);
1159 path_put(lo_p);
1160 }
1161 out:
1162 return err ? ERR_PTR(err) : lo_p_name;
1163 }
1164
hmdfs_server_readdir(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1165 void hmdfs_server_readdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1166 void *data)
1167 {
1168 struct readdir_request *readdir_recv = data;
1169 struct path lo_p;
1170 struct file *filp = NULL;
1171 int err = 0;
1172 unsigned long long num = 0;
1173 char *lo_p_name = NULL;
1174
1175 trace_hmdfs_server_readdir(readdir_recv);
1176
1177 lo_p_name = server_lookup_lower(con, readdir_recv->path, &lo_p);
1178 if (IS_ERR(lo_p_name)) {
1179 err = PTR_ERR(lo_p_name);
1180 hmdfs_info("Failed to get lower path: %d", err);
1181 goto send_err;
1182 }
1183
1184 if (le32_to_cpu(readdir_recv->verify_cache)) {
1185 if (hmdfs_client_cache_validate(con->sbi, readdir_recv, &lo_p))
1186 goto out_response;
1187 }
1188
1189 filp = hmdfs_server_cache_revalidate(con->sbi, lo_p_name, &lo_p);
1190 if (IS_ERR_OR_NULL(filp)) {
1191 filp = hmdfs_server_rebuild_dents(con->sbi, &lo_p, &num,
1192 lo_p_name);
1193 if (IS_ERR_OR_NULL(filp)) {
1194 err = PTR_ERR(filp);
1195 goto err_lookup_path;
1196 }
1197 }
1198
1199 out_response:
1200 err = hmdfs_readfile_response(con, cmd, filp);
1201 if (!err)
1202 hmdfs_add_remote_cache_list(con, lo_p_name);
1203 if (num >= con->sbi->dcache_threshold)
1204 cache_file_persistent(con, filp, lo_p_name, true);
1205 if (filp)
1206 fput(filp);
1207 err_lookup_path:
1208 path_put(&lo_p);
1209 kfree(lo_p_name);
1210 send_err:
1211 if (err)
1212 hmdfs_send_err_response(con, cmd, err);
1213 }
1214
hmdfs_server_mkdir(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1215 void hmdfs_server_mkdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1216 void *data)
1217 {
1218 int err = 0;
1219 struct mkdir_request *mkdir_recv = data;
1220 struct inode *child_inode = NULL;
1221 struct dentry *dent = NULL;
1222 char *mkdir_dir = NULL;
1223 char *mkdir_name = NULL;
1224 struct hmdfs_inodeinfo_response *mkdir_resp = NULL;
1225 int respsize = sizeof(struct hmdfs_inodeinfo_response);
1226 int path_len = le32_to_cpu(mkdir_recv->path_len);
1227
1228 mkdir_resp = kzalloc(respsize, GFP_KERNEL);
1229 if (!mkdir_resp) {
1230 err = -ENOMEM;
1231 goto mkdir_out;
1232 }
1233
1234 mkdir_dir = mkdir_recv->path;
1235 mkdir_name = mkdir_recv->path + path_len + 1;
1236
1237 dent = hmdfs_root_mkdir(con->device_id, con->sbi->local_dst,
1238 mkdir_dir, mkdir_name,
1239 le16_to_cpu(mkdir_recv->mode));
1240 if (IS_ERR(dent)) {
1241 err = PTR_ERR(dent);
1242 hmdfs_err("hmdfs_root_mkdir failed err = %d", err);
1243 goto mkdir_out;
1244 }
1245 child_inode = d_inode(dent);
1246 mkdir_resp->i_mode = cpu_to_le16(child_inode->i_mode);
1247 mkdir_resp->i_size = cpu_to_le64(child_inode->i_size);
1248 mkdir_resp->i_mtime = cpu_to_le64(child_inode->i_mtime.tv_sec);
1249 mkdir_resp->i_mtime_nsec = cpu_to_le32(child_inode->i_mtime.tv_nsec);
1250 mkdir_resp->i_ino = cpu_to_le64(child_inode->i_ino);
1251 dput(dent);
1252 mkdir_out:
1253 hmdfs_sendmessage_response(con, cmd, respsize, mkdir_resp, err);
1254 kfree(mkdir_resp);
1255 }
1256
hmdfs_server_create(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1257 void hmdfs_server_create(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1258 void *data)
1259 {
1260 int err = 0;
1261 struct create_request *create_recv = data;
1262 struct inode *child_inode = NULL;
1263 struct dentry *dent = NULL;
1264 char *create_dir = NULL;
1265 char *create_name = NULL;
1266 struct hmdfs_inodeinfo_response *create_resp = NULL;
1267 int respsize = sizeof(struct hmdfs_inodeinfo_response);
1268 int path_len = le32_to_cpu(create_recv->path_len);
1269
1270 create_resp = kzalloc(respsize, GFP_KERNEL);
1271 if (!create_resp) {
1272 err = -ENOMEM;
1273 goto create_out;
1274 }
1275
1276 create_dir = create_recv->path;
1277 create_name = create_recv->path + path_len + 1;
1278
1279 dent = hmdfs_root_create(con->device_id, con->sbi->local_dst,
1280 create_dir, create_name,
1281 le16_to_cpu(create_recv->mode),
1282 create_recv->want_excl);
1283 if (IS_ERR(dent)) {
1284 err = PTR_ERR(dent);
1285 hmdfs_err("hmdfs_root_create failed err = %d", err);
1286 goto create_out;
1287 }
1288 child_inode = d_inode(dent);
1289 create_resp->i_mode = cpu_to_le16(child_inode->i_mode);
1290 create_resp->i_size = cpu_to_le64(child_inode->i_size);
1291 create_resp->i_mtime = cpu_to_le64(child_inode->i_mtime.tv_sec);
1292 create_resp->i_mtime_nsec = cpu_to_le32(child_inode->i_mtime.tv_nsec);
1293 /*
1294 * keep same as hmdfs_server_open,
1295 * to prevent hmdfs_open_final_remote from judging ino errors.
1296 */
1297 create_resp->i_ino = cpu_to_le64(
1298 generate_u64_ino(hmdfs_i(child_inode)->lower_inode->i_ino,
1299 child_inode->i_generation));
1300 dput(dent);
1301 create_out:
1302 hmdfs_sendmessage_response(con, cmd, respsize, create_resp, err);
1303 kfree(create_resp);
1304 }
1305
hmdfs_server_rmdir(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1306 void hmdfs_server_rmdir(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1307 void *data)
1308 {
1309 int err = 0;
1310 struct path root_path;
1311 char *path = NULL;
1312 char *name = NULL;
1313 struct rmdir_request *rmdir_recv = data;
1314
1315 path = rmdir_recv->path;
1316 name = rmdir_recv->path + le32_to_cpu(rmdir_recv->path_len) + 1;
1317 err = kern_path(con->sbi->local_dst, 0, &root_path);
1318 if (!err) {
1319 err = hmdfs_root_rmdir(con->device_id, &root_path, path, name);
1320 path_put(&root_path);
1321 }
1322
1323 hmdfs_send_err_response(con, cmd, err);
1324 }
1325
hmdfs_server_unlink(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1326 void hmdfs_server_unlink(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1327 void *data)
1328 {
1329 int err = 0;
1330 struct path root_path;
1331 char *path = NULL;
1332 char *name = NULL;
1333 struct unlink_request *unlink_recv = data;
1334
1335 path = unlink_recv->path;
1336 name = unlink_recv->path + le32_to_cpu(unlink_recv->path_len) + 1;
1337 err = kern_path(con->sbi->local_dst, 0, &root_path);
1338 if (!err) {
1339 err = hmdfs_root_unlink(con->device_id, &root_path, path, name);
1340 path_put(&root_path);
1341 }
1342
1343 hmdfs_send_err_response(con, cmd, err);
1344 }
1345
hmdfs_server_rename(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1346 void hmdfs_server_rename(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1347 void *data)
1348 {
1349 int err = 0;
1350 int old_path_len;
1351 int new_path_len;
1352 int old_name_len;
1353 int new_name_len;
1354 unsigned int flags;
1355 char *path_old = NULL;
1356 char *name_old = NULL;
1357 char *path_new = NULL;
1358 char *name_new = NULL;
1359 struct rename_request *recv = data;
1360
1361 old_path_len = le32_to_cpu(recv->old_path_len);
1362 new_path_len = le32_to_cpu(recv->new_path_len);
1363 old_name_len = le32_to_cpu(recv->old_name_len);
1364 new_name_len = le32_to_cpu(recv->new_name_len);
1365 flags = le32_to_cpu(recv->flags);
1366
1367 path_old = recv->path;
1368 path_new = recv->path + old_path_len + 1;
1369 name_old = recv->path + old_path_len + 1 + new_path_len + 1;
1370 name_new = recv->path + old_path_len + 1 + new_path_len + 1 +
1371 old_name_len + 1;
1372
1373 err = hmdfs_root_rename(con->sbi, con->device_id, path_old, name_old,
1374 path_new, name_new, flags);
1375
1376 hmdfs_send_err_response(con, cmd, err);
1377 }
1378
hmdfs_filldir_real(struct dir_context * ctx,const char * name,int name_len,loff_t offset,u64 ino,unsigned int d_type)1379 static int hmdfs_filldir_real(struct dir_context *ctx, const char *name,
1380 int name_len, loff_t offset, u64 ino,
1381 unsigned int d_type)
1382 {
1383 int res = 0;
1384 char namestr[NAME_MAX + 1];
1385 struct getdents_callback_real *gc = NULL;
1386 struct dentry *child = NULL;
1387
1388 if (name_len > NAME_MAX) {
1389 hmdfs_err("name_len:%d NAME_MAX:%u", name_len, NAME_MAX);
1390 goto out;
1391 }
1392
1393 gc = container_of(ctx, struct getdents_callback_real, ctx);
1394
1395 memcpy(namestr, name, name_len);
1396 namestr[name_len] = '\0';
1397
1398 if (hmdfs_file_type(namestr) != HMDFS_TYPE_COMMON)
1399 goto out;
1400
1401 /* parent lock already hold by iterate_dir */
1402 child = lookup_one_len(name, gc->parent_path->dentry, name_len);
1403 if (IS_ERR(child)) {
1404 res = PTR_ERR(child);
1405 hmdfs_err("lookup failed because %d", res);
1406 goto out;
1407 }
1408
1409 if (d_really_is_negative(child)) {
1410 dput(child);
1411 hmdfs_err("lookup failed because negative dentry");
1412 /* just do not fill this entry and continue for next entry */
1413 goto out;
1414 }
1415
1416 if (d_type == DT_REG || d_type == DT_DIR) {
1417 create_dentry(child, d_inode(child), gc->file, gc->sbi);
1418 gc->num++;
1419 }
1420
1421 dput(child);
1422
1423 out:
1424 /*
1425 * we always return 0 here, so that the caller can continue to next
1426 * dentry even if failed on this dentry somehow.
1427 */
1428 return 0;
1429 }
1430
hmdfs_server_set_header(struct hmdfs_dcache_header * header,struct file * file,struct file * dentry_file)1431 static void hmdfs_server_set_header(struct hmdfs_dcache_header *header,
1432 struct file *file, struct file *dentry_file)
1433 {
1434 struct inode *inode = NULL;
1435 struct hmdfs_time_t cur_time;
1436
1437 inode = file_inode(file);
1438 cur_time = current_time(file_inode(dentry_file));
1439 header->dcache_crtime = cpu_to_le64(cur_time.tv_sec);
1440 header->dcache_crtime_nsec = cpu_to_le64(cur_time.tv_nsec);
1441 header->dentry_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
1442 header->dentry_ctime_nsec = cpu_to_le64(inode->i_ctime.tv_nsec);
1443 }
1444
1445 // Get the dentries of target directory
hmdfs_server_rebuild_dents(struct hmdfs_sb_info * sbi,struct path * path,loff_t * num,const char * dir)1446 struct file *hmdfs_server_rebuild_dents(struct hmdfs_sb_info *sbi,
1447 struct path *path, loff_t *num,
1448 const char *dir)
1449 {
1450 int err = 0;
1451 struct getdents_callback_real gc = {
1452 .ctx.actor = hmdfs_filldir_real,
1453 .ctx.pos = 0,
1454 .num = 0,
1455 .sbi = sbi,
1456 .dir = dir,
1457 };
1458 struct file *file = NULL;
1459 struct file *dentry_file = NULL;
1460 struct hmdfs_dcache_header header;
1461
1462 dentry_file = create_local_dentry_file_cache(sbi);
1463 if (IS_ERR(dentry_file)) {
1464 hmdfs_err("file create failed err=%ld", PTR_ERR(dentry_file));
1465 return dentry_file;
1466 }
1467
1468 file = dentry_open(path, O_RDONLY | O_DIRECTORY, current_cred());
1469 if (IS_ERR(file)) {
1470 err = PTR_ERR(file);
1471 hmdfs_err("dentry_open failed");
1472 goto out;
1473 }
1474
1475 hmdfs_server_set_header(&header, file, dentry_file);
1476
1477 gc.parent_path = path;
1478 gc.file = dentry_file;
1479
1480 err = iterate_dir(file, &(gc.ctx));
1481 if (err) {
1482 hmdfs_err("iterate_dir failed");
1483 goto out;
1484 }
1485
1486 header.case_sensitive = sbi->s_case_sensitive;
1487 header.num = cpu_to_le64(gc.num);
1488 if (num)
1489 *num = gc.num;
1490
1491 err = write_header(dentry_file, &header);
1492 out:
1493 if (!IS_ERR_OR_NULL(file))
1494 fput(file);
1495
1496 if (err) {
1497 fput(dentry_file);
1498 dentry_file = ERR_PTR(err);
1499 }
1500
1501 trace_hmdfs_server_rebuild_dents(&header, err);
1502 return dentry_file;
1503 }
1504
hmdfs_server_writepage(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1505 void hmdfs_server_writepage(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1506 void *data)
1507 {
1508 struct writepage_request *writepage_recv = data;
1509 struct hmdfs_server_writeback *hswb = NULL;
1510 __u64 file_ver;
1511 __u32 file_id;
1512 struct file *file = NULL;
1513 loff_t pos;
1514 __u32 count;
1515 ssize_t ret;
1516 int err = 0;
1517
1518 file_id = le32_to_cpu(writepage_recv->file_id);
1519 file_ver = le64_to_cpu(writepage_recv->file_ver);
1520 file = get_file_by_fid_and_ver(con, cmd, file_id, file_ver);
1521 if (IS_ERR(file)) {
1522 hmdfs_info(
1523 "file with id %u does not exist, pgindex %llu, devid %llu",
1524 file_id, le64_to_cpu(writepage_recv->index),
1525 con->device_id);
1526 err = PTR_ERR(file);
1527 goto out;
1528 }
1529
1530 pos = (loff_t)le64_to_cpu(writepage_recv->index) << HMDFS_PAGE_OFFSET;
1531 count = le32_to_cpu(writepage_recv->count);
1532 ret = kernel_write(file, writepage_recv->buf, count, &pos);
1533 if (ret != count)
1534 err = -EIO;
1535
1536 hmdfs_close_path(file);
1537 out:
1538 hmdfs_send_err_response(con, cmd, err);
1539
1540 hswb = con->sbi->h_swb;
1541 if (!err && hswb->dirty_writeback_control)
1542 hmdfs_server_check_writeback(hswb);
1543 }
1544
hmdfs_verify_path(struct dentry * dentry,char * recv_buf,struct super_block * sb)1545 static struct inode *hmdfs_verify_path(struct dentry *dentry, char *recv_buf,
1546 struct super_block *sb)
1547 {
1548 struct inode *inode = d_inode(dentry);
1549 struct hmdfs_inode_info *info = NULL;
1550
1551 /* if we found path from wrong fs */
1552 if (inode->i_sb != sb) {
1553 hmdfs_err("super block do not match");
1554 return NULL;
1555 }
1556
1557 info = hmdfs_i(inode);
1558 /* make sure lower inode is not NULL */
1559 if (info->lower_inode)
1560 return info->lower_inode;
1561
1562 /*
1563 * we don't expect lower inode to be NULL in server. However, it's
1564 * possible because dentry cache can contain stale data.
1565 */
1566 hmdfs_info("lower inode is NULL, is remote file: %d",
1567 info->conn != NULL);
1568 return NULL;
1569 }
1570
hmdfs_notify_change(struct vfsmount * mnt,struct dentry * dentry,struct iattr * attr,struct inode ** delegated_inode)1571 static int hmdfs_notify_change(struct vfsmount *mnt, struct dentry *dentry,
1572 struct iattr *attr,
1573 struct inode **delegated_inode)
1574 {
1575 #ifdef CONFIG_SDCARD_FS
1576 /* sdcard_fs need to call setattr2, notify_change will call setattr */
1577 return notify_change2(mnt, dentry, attr, delegated_inode);
1578 #else
1579 return notify_change(dentry, attr, delegated_inode);
1580 #endif
1581 }
1582
hmdfs_server_setattr(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1583 void hmdfs_server_setattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1584 void *data)
1585 {
1586 int err = 0;
1587 struct dentry *dentry = NULL;
1588 struct inode *inode = NULL;
1589 struct setattr_request *recv = data;
1590 struct path root_path, dst_path;
1591 struct iattr attr;
1592 __u32 valid = le32_to_cpu(recv->valid);
1593
1594 err = kern_path(con->sbi->local_dst, 0, &root_path);
1595 if (err) {
1596 hmdfs_err("kern_path failed err = %d", err);
1597 goto out;
1598 }
1599
1600 err = vfs_path_lookup(root_path.dentry, root_path.mnt, recv->buf, 0,
1601 &dst_path);
1602 if (err)
1603 goto out_put_root;
1604
1605 inode = hmdfs_verify_path(dst_path.dentry, recv->buf, con->sbi->sb);
1606 if (!inode) {
1607 err = -ENOENT;
1608 goto out_put_dst;
1609 }
1610
1611 if (S_ISLNK(inode->i_mode)) {
1612 err = -EPERM;
1613 goto out_put_dst;
1614 }
1615
1616 dentry = dst_path.dentry;
1617 memset(&attr, 0, sizeof(attr));
1618 /* only support size and mtime */
1619 if (valid & (ATTR_SIZE | ATTR_MTIME))
1620 attr.ia_valid =
1621 (valid & (ATTR_MTIME | ATTR_MTIME_SET | ATTR_SIZE));
1622 attr.ia_size = le64_to_cpu(recv->size);
1623 attr.ia_mtime.tv_sec = le64_to_cpu(recv->mtime);
1624 attr.ia_mtime.tv_nsec = le32_to_cpu(recv->mtime_nsec);
1625
1626 inode_lock(dentry->d_inode);
1627 err = hmdfs_notify_change(dst_path.mnt, dentry, &attr, NULL);
1628 inode_unlock(dentry->d_inode);
1629
1630 out_put_dst:
1631 path_put(&dst_path);
1632 out_put_root:
1633 path_put(&root_path);
1634 out:
1635 hmdfs_send_err_response(con, cmd, err);
1636 }
1637
update_getattr_response(struct hmdfs_peer * con,struct inode * inode,struct kstat * ks,struct getattr_response * resp)1638 static void update_getattr_response(struct hmdfs_peer *con, struct inode *inode,
1639 struct kstat *ks,
1640 struct getattr_response *resp)
1641 {
1642 /* if getattr for link, get ino and mode from actual lower inode */
1643 resp->ino = cpu_to_le64(
1644 generate_u64_ino(inode->i_ino, inode->i_generation));
1645 resp->mode = cpu_to_le16(inode->i_mode);
1646
1647 /* get other information from vfs_getattr() */
1648 resp->result_mask = cpu_to_le32(STATX_BASIC_STATS | STATX_BTIME);
1649 resp->fsid = cpu_to_le64(ks->dev);
1650 resp->nlink = cpu_to_le32(ks->nlink);
1651 resp->uid = cpu_to_le32(ks->uid.val);
1652 resp->gid = cpu_to_le32(ks->gid.val);
1653 resp->size = cpu_to_le64(ks->size);
1654 resp->blocks = cpu_to_le64(ks->blocks);
1655 resp->blksize = cpu_to_le32(ks->blksize);
1656 resp->atime = cpu_to_le64(ks->atime.tv_sec);
1657 resp->atime_nsec = cpu_to_le32(ks->atime.tv_nsec);
1658 resp->mtime = cpu_to_le64(ks->mtime.tv_sec);
1659 resp->mtime_nsec = cpu_to_le32(ks->mtime.tv_nsec);
1660 resp->ctime = cpu_to_le64(ks->ctime.tv_sec);
1661 resp->ctime_nsec = cpu_to_le32(ks->ctime.tv_nsec);
1662 resp->crtime = cpu_to_le64(ks->btime.tv_sec);
1663 resp->crtime_nsec = cpu_to_le32(ks->btime.tv_nsec);
1664 }
1665
hmdfs_server_getattr(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1666 void hmdfs_server_getattr(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1667 void *data)
1668 {
1669 int err = 0;
1670 struct getattr_request *recv = data;
1671 int size_read = sizeof(struct getattr_response);
1672 struct getattr_response *resp = NULL;
1673 struct kstat ks;
1674 struct path root_path, dst_path;
1675 struct inode *inode = NULL;
1676 unsigned int recv_flags = le32_to_cpu(recv->lookup_flags);
1677 unsigned int lookup_flags = 0;
1678
1679 err = hmdfs_convert_lookup_flags(recv_flags, &lookup_flags);
1680 if (err)
1681 goto err;
1682
1683 resp = kzalloc(size_read, GFP_KERNEL);
1684 if (!resp) {
1685 err = -ENOMEM;
1686 goto err;
1687 }
1688 err = kern_path(con->sbi->local_dst, 0, &root_path);
1689 if (err) {
1690 hmdfs_err("kern_path failed err = %d", err);
1691 goto err_free_resp;
1692 }
1693
1694 err = vfs_path_lookup(root_path.dentry, root_path.mnt, recv->buf,
1695 lookup_flags, &dst_path);
1696 if (err)
1697 goto out_put_root;
1698
1699 inode = hmdfs_verify_path(dst_path.dentry, recv->buf, con->sbi->sb);
1700 if (!inode) {
1701 err = -ENOENT;
1702 goto out_put_dst;
1703 }
1704
1705 if (S_ISLNK(inode->i_mode)) {
1706 err = -EPERM;
1707 goto out_put_dst;
1708 }
1709
1710 err = vfs_getattr(&dst_path, &ks, STATX_BASIC_STATS | STATX_BTIME, 0);
1711 if (err)
1712 goto err_put_dst;
1713 update_getattr_response(con, inode, &ks, resp);
1714
1715 out_put_dst:
1716 path_put(&dst_path);
1717 out_put_root:
1718 /*
1719 * if path lookup failed, we return with result_mask setting to
1720 * zero. So we can be aware of such situation in caller.
1721 */
1722 if (err)
1723 resp->result_mask = cpu_to_le32(0);
1724 path_put(&root_path);
1725 hmdfs_sendmessage_response(con, cmd, size_read, resp, err);
1726 kfree(resp);
1727 return;
1728
1729 err_put_dst:
1730 path_put(&dst_path);
1731 path_put(&root_path);
1732 err_free_resp:
1733 kfree(resp);
1734 err:
1735 hmdfs_send_err_response(con, cmd, err);
1736 }
1737
init_statfs_response(struct statfs_response * resp,struct kstatfs * st)1738 static void init_statfs_response(struct statfs_response *resp,
1739 struct kstatfs *st)
1740 {
1741 resp->f_type = cpu_to_le64(HMDFS_SUPER_MAGIC);
1742 resp->f_bsize = cpu_to_le64(st->f_bsize);
1743 resp->f_blocks = cpu_to_le64(st->f_blocks);
1744 resp->f_bfree = cpu_to_le64(st->f_bfree);
1745 resp->f_bavail = cpu_to_le64(st->f_bavail);
1746 resp->f_files = cpu_to_le64(st->f_files);
1747 resp->f_ffree = cpu_to_le64(st->f_ffree);
1748 resp->f_fsid_0 = cpu_to_le32(st->f_fsid.val[0]);
1749 resp->f_fsid_1 = cpu_to_le32(st->f_fsid.val[1]);
1750 resp->f_namelen = cpu_to_le64(st->f_namelen);
1751 resp->f_frsize = cpu_to_le64(st->f_frsize);
1752 resp->f_flags = cpu_to_le64(st->f_flags);
1753 /* f_spare is not used in f2fs or ext4 */
1754 resp->f_spare_0 = cpu_to_le64(st->f_spare[0]);
1755 resp->f_spare_1 = cpu_to_le64(st->f_spare[1]);
1756 resp->f_spare_2 = cpu_to_le64(st->f_spare[2]);
1757 resp->f_spare_3 = cpu_to_le64(st->f_spare[3]);
1758 }
1759
hmdfs_server_statfs(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1760 void hmdfs_server_statfs(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1761 void *data)
1762 {
1763 struct statfs_request *recv = data;
1764 struct statfs_response *resp = NULL;
1765 struct path root_path, path;
1766 struct kstatfs *st = NULL;
1767 int err = 0;
1768
1769 st = kzalloc(sizeof(*st), GFP_KERNEL);
1770 if (!st) {
1771 err = -ENOMEM;
1772 goto out;
1773 }
1774
1775 resp = kmalloc(sizeof(*resp), GFP_KERNEL);
1776 if (!resp) {
1777 err = -ENOMEM;
1778 goto free_st;
1779 }
1780
1781 err = kern_path(con->sbi->local_src, 0, &root_path);
1782 if (err) {
1783 hmdfs_info("kern_path failed err = %d", err);
1784 goto free_st;
1785 }
1786
1787 err = vfs_path_lookup(root_path.dentry, root_path.mnt, recv->path, 0,
1788 &path);
1789 if (err) {
1790 hmdfs_info("recv->path found failed err = %d", err);
1791 goto put_root;
1792 }
1793
1794 err = vfs_statfs(&path, st);
1795 if (err)
1796 hmdfs_info("statfs local dentry failed, err = %d", err);
1797 init_statfs_response(resp, st);
1798 path_put(&path);
1799
1800 put_root:
1801 path_put(&root_path);
1802 free_st:
1803 kfree(st);
1804 out:
1805 if (err)
1806 hmdfs_send_err_response(con, cmd, err);
1807 else
1808 hmdfs_sendmessage_response(con, cmd, sizeof(*resp), resp, 0);
1809
1810 kfree(resp);
1811 }
1812
hmdfs_server_syncfs(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1813 void hmdfs_server_syncfs(struct hmdfs_peer *con, struct hmdfs_head_cmd *cmd,
1814 void *data)
1815 {
1816 /*
1817 * Reserved interface. There is a difference compared with traditional
1818 * syncfs process. Remote syncfs process in client:
1819 * 1. Remote writepages by async call
1820 * 2. Remote syncfs calling
1821 * 3. Wait all remote async calls(writepages) return in step 1
1822 */
1823 int ret = 0;
1824
1825 hmdfs_send_err_response(con, cmd, ret);
1826 }
1827
hmdfs_server_getxattr(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1828 void hmdfs_server_getxattr(struct hmdfs_peer *con,
1829 struct hmdfs_head_cmd *cmd, void *data)
1830 {
1831 struct getxattr_request *recv = data;
1832 size_t size = le32_to_cpu(recv->size);
1833 size_t size_read = sizeof(struct getxattr_response) + size;
1834 struct getxattr_response *resp = NULL;
1835 struct path root_path;
1836 struct path path;
1837 char *file_path = recv->buf;
1838 char *name = recv->buf + recv->path_len + 1;
1839 int err = -ENOMEM;
1840
1841 resp = kzalloc(size_read, GFP_KERNEL);
1842 if (!resp)
1843 goto err;
1844
1845 err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &root_path);
1846 if (err) {
1847 hmdfs_info("kern_path failed err = %d", err);
1848 goto err_free_resp;
1849 }
1850
1851 err = vfs_path_lookup(root_path.dentry, root_path.mnt,
1852 file_path, 0, &path);
1853 if (err) {
1854 hmdfs_info("path found failed err = %d", err);
1855 goto err_put_root;
1856 }
1857
1858 if (!size)
1859 err = vfs_getxattr(path.dentry, name, NULL, size);
1860 else
1861 err = vfs_getxattr(path.dentry, name, resp->value, size);
1862 if (err < 0) {
1863 hmdfs_info("getxattr failed err %d", err);
1864 goto err_put_path;
1865 }
1866
1867 resp->size = cpu_to_le32(err);
1868 hmdfs_sendmessage_response(con, cmd, size_read, resp, 0);
1869 path_put(&path);
1870 path_put(&root_path);
1871 kfree(resp);
1872 return;
1873
1874 err_put_path:
1875 path_put(&path);
1876 err_put_root:
1877 path_put(&root_path);
1878 err_free_resp:
1879 kfree(resp);
1880 err:
1881 hmdfs_send_err_response(con, cmd, err);
1882 }
1883
hmdfs_server_setxattr(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1884 void hmdfs_server_setxattr(struct hmdfs_peer *con,
1885 struct hmdfs_head_cmd *cmd, void *data)
1886 {
1887 struct setxattr_request *recv = data;
1888 size_t size = le32_to_cpu(recv->size);
1889 int flags = le32_to_cpu(recv->flags);
1890 bool del = recv->del;
1891 struct path root_path;
1892 struct path path;
1893 const char *file_path = NULL;
1894 const char *name = NULL;
1895 const void *value = NULL;
1896 int err;
1897
1898 err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &root_path);
1899 if (err) {
1900 hmdfs_info("kern_path failed err = %d", err);
1901 goto err;
1902 }
1903
1904 file_path = recv->buf;
1905 name = recv->buf + recv->path_len + 1;
1906 value = name + recv->name_len + 1;
1907 err = vfs_path_lookup(root_path.dentry, root_path.mnt,
1908 file_path, 0, &path);
1909 if (err) {
1910 hmdfs_info("path found failed err = %d", err);
1911 goto err_put_root;
1912 }
1913
1914 if (del) {
1915 WARN_ON(flags != XATTR_REPLACE);
1916 err = vfs_removexattr(path.dentry, name);
1917 } else {
1918 err = vfs_setxattr(path.dentry, name, value, size, flags);
1919 }
1920
1921 path_put(&path);
1922 err_put_root:
1923 path_put(&root_path);
1924 err:
1925 hmdfs_send_err_response(con, cmd, err);
1926 }
1927
hmdfs_server_listxattr(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1928 void hmdfs_server_listxattr(struct hmdfs_peer *con,
1929 struct hmdfs_head_cmd *cmd, void *data)
1930 {
1931 struct listxattr_request *recv = data;
1932 size_t size = le32_to_cpu(recv->size);
1933 int size_read = sizeof(struct listxattr_response) + size;
1934 struct listxattr_response *resp = NULL;
1935 const char *file_path = NULL;
1936 struct path root_path;
1937 struct path path;
1938 int err = 0;
1939
1940 resp = kzalloc(size_read, GFP_KERNEL);
1941 if (!resp) {
1942 err = -ENOMEM;
1943 goto err;
1944 }
1945
1946 err = kern_path(con->sbi->local_dst, LOOKUP_DIRECTORY, &root_path);
1947 if (err) {
1948 hmdfs_info("kern_path failed err = %d", err);
1949 goto err_free_resp;
1950 }
1951
1952 file_path = recv->buf;
1953 err = vfs_path_lookup(root_path.dentry, root_path.mnt,
1954 file_path, 0, &path);
1955 if (err) {
1956 hmdfs_info("path found failed err = %d", err);
1957 goto err_put_root;
1958 }
1959
1960 if (!size)
1961 err = vfs_listxattr(path.dentry, NULL, size);
1962 else
1963 err = vfs_listxattr(path.dentry, resp->list, size);
1964 if (err < 0) {
1965 hmdfs_info("listxattr failed err = %d", err);
1966 goto err_put_path;
1967 }
1968
1969 resp->size = cpu_to_le32(err);
1970 hmdfs_sendmessage_response(con, cmd, size_read, resp, 0);
1971 path_put(&root_path);
1972 path_put(&path);
1973 kfree(resp);
1974 return;
1975
1976 err_put_path:
1977 path_put(&path);
1978 err_put_root:
1979 path_put(&root_path);
1980 err_free_resp:
1981 kfree(resp);
1982 err:
1983 hmdfs_send_err_response(con, cmd, err);
1984 }
1985
hmdfs_server_get_drop_push(struct hmdfs_peer * con,struct hmdfs_head_cmd * cmd,void * data)1986 void hmdfs_server_get_drop_push(struct hmdfs_peer *con,
1987 struct hmdfs_head_cmd *cmd, void *data)
1988 {
1989 struct drop_push_request *dp_recv = data;
1990 struct path root_path, path;
1991 int err;
1992 char *tmp_path = NULL;
1993
1994 err = kern_path(con->sbi->real_dst, 0, &root_path);
1995 if (err) {
1996 hmdfs_err("kern_path failed err = %d", err);
1997 goto quickack;
1998 }
1999 tmp_path = kzalloc(PATH_MAX, GFP_KERNEL);
2000 if (!tmp_path)
2001 goto out;
2002 snprintf(tmp_path, PATH_MAX, "/" DEVICE_VIEW_ROOT "/%s%s",
2003 con->cid, dp_recv->path);
2004
2005 err = vfs_path_lookup(root_path.dentry, root_path.mnt, tmp_path, 0,
2006 &path);
2007 if (err) {
2008 hmdfs_info("path found failed err = %d", err);
2009 goto free;
2010 }
2011 hmdfs_remove_cache_filp(con, path.dentry);
2012
2013 path_put(&path);
2014 free:
2015 kfree(tmp_path);
2016 out:
2017 path_put(&root_path);
2018 quickack:
2019 set_conn_sock_quickack(con);
2020 }
2021