1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/hmdfs_client.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "hmdfs_client.h"
9 #include "hmdfs_server.h"
10
11 #include <linux/highmem.h>
12 #include <linux/sched/signal.h>
13 #include <linux/statfs.h>
14
15 #include "comm/socket_adapter.h"
16 #include "hmdfs_dentryfile.h"
17 #include "hmdfs_trace.h"
18 #include "comm/node_cb.h"
19 #include "stash.h"
20 #include "authority/authentication.h"
21
22 #define HMDFS_SYNC_WPAGE_RETRY_MS 2000
23
free_sm_outbuf(struct hmdfs_send_command * sm)24 static inline void free_sm_outbuf(struct hmdfs_send_command *sm)
25 {
26 if (sm->out_buf && sm->out_len != 0)
27 kfree(sm->out_buf);
28 sm->out_len = 0;
29 sm->out_buf = NULL;
30 }
31
hmdfs_send_open(struct hmdfs_peer * con,const char * send_buf,__u8 file_type,struct hmdfs_open_ret * open_ret)32 int hmdfs_send_open(struct hmdfs_peer *con, const char *send_buf,
33 __u8 file_type, struct hmdfs_open_ret *open_ret)
34 {
35 int ret;
36 int path_len = strlen(send_buf);
37 size_t send_len = sizeof(struct open_request) + path_len + 1;
38 struct open_request *open_req = kzalloc(send_len, GFP_KERNEL);
39 struct open_response *resp;
40 struct hmdfs_send_command sm = {
41 .data = open_req,
42 .len = send_len,
43 };
44 hmdfs_init_cmd(&sm.operations, F_OPEN);
45
46 if (!open_req) {
47 ret = -ENOMEM;
48 goto out;
49 }
50 open_req->file_type = file_type;
51 open_req->path_len = cpu_to_le32(path_len);
52 strcpy(open_req->buf, send_buf);
53 ret = hmdfs_sendmessage_request(con, &sm);
54 kfree(open_req);
55
56 if (!ret && (sm.out_len == 0 || !sm.out_buf))
57 ret = -ENOENT;
58 if (ret)
59 goto out;
60 resp = sm.out_buf;
61
62 open_ret->ino = le64_to_cpu(resp->ino);
63 open_ret->fid.ver = le64_to_cpu(resp->file_ver);
64 open_ret->fid.id = le32_to_cpu(resp->file_id);
65 open_ret->file_size = le64_to_cpu(resp->file_size);
66 open_ret->remote_ctime.tv_sec = le64_to_cpu(resp->ctime);
67 open_ret->remote_ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec);
68 open_ret->stable_ctime.tv_sec = le64_to_cpu(resp->stable_ctime);
69 open_ret->stable_ctime.tv_nsec = le32_to_cpu(resp->stable_ctime_nsec);
70
71 out:
72 free_sm_outbuf(&sm);
73 return ret;
74 }
75
hmdfs_send_close(struct hmdfs_peer * con,const struct hmdfs_fid * fid)76 void hmdfs_send_close(struct hmdfs_peer *con, const struct hmdfs_fid *fid)
77 {
78 size_t send_len = sizeof(struct release_request);
79 struct release_request *release_req = kzalloc(send_len, GFP_KERNEL);
80 struct hmdfs_send_command sm = {
81 .data = release_req,
82 .len = send_len,
83 };
84 hmdfs_init_cmd(&sm.operations, F_RELEASE);
85
86 if (!release_req)
87 return;
88
89 release_req->file_ver = cpu_to_le64(fid->ver);
90 release_req->file_id = cpu_to_le32(fid->id);
91
92 hmdfs_sendmessage_request(con, &sm);
93 kfree(release_req);
94 }
95
hmdfs_send_fsync(struct hmdfs_peer * con,const struct hmdfs_fid * fid,__s64 start,__s64 end,__s32 datasync)96 int hmdfs_send_fsync(struct hmdfs_peer *con, const struct hmdfs_fid *fid,
97 __s64 start, __s64 end, __s32 datasync)
98 {
99 int ret;
100 struct fsync_request *fsync_req =
101 kzalloc(sizeof(struct fsync_request), GFP_KERNEL);
102 struct hmdfs_send_command sm = {
103 .data = fsync_req,
104 .len = sizeof(struct fsync_request),
105 };
106
107 hmdfs_init_cmd(&sm.operations, F_FSYNC);
108 if (!fsync_req)
109 return -ENOMEM;
110
111 fsync_req->file_ver = cpu_to_le64(fid->ver);
112 fsync_req->file_id = cpu_to_le32(fid->id);
113 fsync_req->datasync = cpu_to_le32(datasync);
114 fsync_req->start = cpu_to_le64(start);
115 fsync_req->end = cpu_to_le64(end);
116
117 ret = hmdfs_sendmessage_request(con, &sm);
118
119 free_sm_outbuf(&sm);
120 kfree(fsync_req);
121 return ret;
122 }
123
hmdfs_client_readpage(struct hmdfs_peer * con,const struct hmdfs_fid * fid,struct page * page)124 int hmdfs_client_readpage(struct hmdfs_peer *con, const struct hmdfs_fid *fid,
125 struct page *page)
126 {
127 int ret;
128 size_t send_len = sizeof(struct readpage_request);
129 struct readpage_request *read_data = kzalloc(send_len, GFP_KERNEL);
130 struct hmdfs_send_command sm = {
131 .data = read_data,
132 .len = send_len,
133 };
134
135 hmdfs_init_cmd(&sm.operations, F_READPAGE);
136 if (!read_data) {
137 unlock_page(page);
138 return -ENOMEM;
139 }
140
141 sm.out_buf = page;
142 read_data->file_ver = cpu_to_le64(fid->ver);
143 read_data->file_id = cpu_to_le32(fid->id);
144 read_data->size = cpu_to_le32(HMDFS_PAGE_SIZE);
145 read_data->index = cpu_to_le64(page->index);
146 ret = hmdfs_sendpage_request(con, &sm);
147 kfree(read_data);
148 return ret;
149 }
150
hmdfs_usr_sig_pending(struct task_struct * p)151 bool hmdfs_usr_sig_pending(struct task_struct *p)
152 {
153 sigset_t *sig = &p->pending.signal;
154
155 if (likely(!signal_pending(p)))
156 return false;
157 return sigismember(sig, SIGINT) || sigismember(sig, SIGTERM) ||
158 sigismember(sig, SIGKILL);
159 }
160
hmdfs_client_writepage_done(struct hmdfs_inode_info * info,struct hmdfs_writepage_context * ctx)161 void hmdfs_client_writepage_done(struct hmdfs_inode_info *info,
162 struct hmdfs_writepage_context *ctx)
163 {
164 struct page *page = ctx->page;
165 bool unlock = ctx->rsem_held;
166
167 SetPageUptodate(page);
168 end_page_writeback(page);
169 if (unlock)
170 up_read(&info->wpage_sem);
171 unlock_page(page);
172 }
173
hmdfs_client_writepage_err(struct hmdfs_peer * peer,struct hmdfs_inode_info * info,struct hmdfs_writepage_context * ctx,int err)174 static void hmdfs_client_writepage_err(struct hmdfs_peer *peer,
175 struct hmdfs_inode_info *info,
176 struct hmdfs_writepage_context *ctx,
177 int err)
178 {
179 struct page *page = ctx->page;
180 bool unlock = ctx->rsem_held;
181
182 if (err == -ENOMEM || err == -EAGAIN || err == -ESHUTDOWN ||
183 err == -ETIME)
184 SetPageUptodate(page);
185 else
186 hmdfs_info("Page %ld of file %u writeback err %d devid %llu",
187 page->index, ctx->fid.id, err, peer->device_id);
188
189 /*
190 * Current and subsequent writebacks have been canceled by the
191 * user, leaving these pages' states in chaos. Read pages in
192 * the future to update these pages.
193 */
194 if (ctx->sync_all && hmdfs_usr_sig_pending(ctx->caller))
195 ClearPageUptodate(page);
196
197 if (ctx->sync_all || !time_is_after_eq_jiffies(ctx->timeout) ||
198 !(err == -ETIME || hmdfs_need_redirty_page(info, err))) {
199 SetPageError(page);
200 mapping_set_error(page->mapping, -EIO);
201 } else {
202 __set_page_dirty_nobuffers(page);
203 account_page_redirty(page);
204 }
205
206 end_page_writeback(page);
207 if (unlock)
208 up_read(&info->wpage_sem);
209 unlock_page(page);
210 }
211
212 static inline bool
hmdfs_no_timedout_sync_write(struct hmdfs_writepage_context * ctx)213 hmdfs_no_timedout_sync_write(struct hmdfs_writepage_context *ctx)
214 {
215 return ctx->sync_all && time_is_after_eq_jiffies(ctx->timeout);
216 }
217
218 static inline bool
hmdfs_client_rewrite_for_timeout(struct hmdfs_writepage_context * ctx,int err)219 hmdfs_client_rewrite_for_timeout(struct hmdfs_writepage_context *ctx, int err)
220 {
221 return (err == -ETIME && hmdfs_no_timedout_sync_write(ctx) &&
222 !hmdfs_usr_sig_pending(ctx->caller));
223 }
224
225 static inline bool
hmdfs_client_rewrite_for_offline(struct hmdfs_sb_info * sbi,struct hmdfs_writepage_context * ctx,int err)226 hmdfs_client_rewrite_for_offline(struct hmdfs_sb_info *sbi,
227 struct hmdfs_writepage_context *ctx, int err)
228 {
229 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
230 unsigned int status = READ_ONCE(info->stash_status);
231
232 /*
233 * No retry if offline occurs during inode restoration.
234 *
235 * Do retry if local file cache is ready even it is not
236 * a WB_SYNC_ALL write, else no-sync_all writeback will
237 * return -EIO, mapping_set_error(mapping, -EIO) will be
238 * called and it will make the concurrent calling of
239 * filemap_write_and_wait() in hmdfs_flush_stash_file_data()
240 * return -EIO.
241 */
242 return (hmdfs_is_stash_enabled(sbi) &&
243 status != HMDFS_REMOTE_INODE_RESTORING &&
244 (hmdfs_no_timedout_sync_write(ctx) ||
245 status == HMDFS_REMOTE_INODE_STASHING) &&
246 hmdfs_is_offline_or_timeout_err(err));
247 }
248
249 static inline bool
hmdfs_client_redo_writepage(struct hmdfs_sb_info * sbi,struct hmdfs_writepage_context * ctx,int err)250 hmdfs_client_redo_writepage(struct hmdfs_sb_info *sbi,
251 struct hmdfs_writepage_context *ctx, int err)
252 {
253 return hmdfs_client_rewrite_for_timeout(ctx, err) ||
254 hmdfs_client_rewrite_for_offline(sbi, ctx, err);
255 }
256
hmdfs_remote_write_to_remote(struct hmdfs_inode_info * info)257 static bool hmdfs_remote_write_to_remote(struct hmdfs_inode_info *info)
258 {
259 unsigned int status = READ_ONCE(info->stash_status);
260 bool stashing;
261
262 if (status != HMDFS_REMOTE_INODE_STASHING)
263 return true;
264
265 /* Ensure it's OK to use info->cache afterwards */
266 spin_lock(&info->stash_lock);
267 stashing = (info->stash_status == HMDFS_REMOTE_INODE_STASHING);
268 spin_unlock(&info->stash_lock);
269
270 return !stashing;
271 }
272
hmdfs_remote_do_writepage(struct hmdfs_peer * con,struct hmdfs_writepage_context * ctx)273 int hmdfs_remote_do_writepage(struct hmdfs_peer *con,
274 struct hmdfs_writepage_context *ctx)
275 {
276 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
277 bool to_remote = false;
278 int err = 0;
279
280 to_remote = hmdfs_remote_write_to_remote(info);
281 if (to_remote)
282 err = hmdfs_client_writepage(info->conn, ctx);
283 else
284 err = hmdfs_stash_writepage(info->conn, ctx);
285 if (!err)
286 return 0;
287
288 if (!(to_remote &&
289 hmdfs_client_rewrite_for_offline(con->sbi, ctx, err)))
290 return err;
291
292 queue_delayed_work(con->retry_wb_wq, &ctx->retry_dwork,
293 msecs_to_jiffies(HMDFS_SYNC_WPAGE_RETRY_MS));
294
295 return 0;
296 }
297
hmdfs_remote_writepage_retry(struct work_struct * work)298 void hmdfs_remote_writepage_retry(struct work_struct *work)
299 {
300 struct hmdfs_writepage_context *ctx =
301 container_of(work, struct hmdfs_writepage_context,
302 retry_dwork.work);
303 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
304 struct hmdfs_peer *peer = info->conn;
305 const struct cred *old_cred = NULL;
306 int err;
307
308 old_cred = hmdfs_override_creds(peer->sbi->cred);
309 err = hmdfs_remote_do_writepage(peer, ctx);
310 hmdfs_revert_creds(old_cred);
311 if (err) {
312 hmdfs_client_writepage_err(peer, info, ctx, err);
313 put_task_struct(ctx->caller);
314 kfree(ctx);
315 }
316 }
317
hmdfs_writepage_cb(struct hmdfs_peer * peer,const struct hmdfs_req * req,const struct hmdfs_resp * resp)318 void hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req,
319 const struct hmdfs_resp *resp)
320 {
321 struct hmdfs_writepage_context *ctx = req->private;
322 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
323 int ret = resp->ret_code;
324 unsigned long page_index = ctx->page->index;
325
326 trace_hmdfs_writepage_cb_enter(peer, info->remote_ino, page_index, ret);
327
328 if (!ret) {
329 hmdfs_client_writepage_done(info, ctx);
330 atomic64_inc(&info->write_counter);
331 goto cleanup_all;
332 }
333
334 if (hmdfs_client_redo_writepage(peer->sbi, ctx, ret)) {
335 ret = hmdfs_remote_do_writepage(peer, ctx);
336 if (!ret)
337 goto cleanup_req;
338 WARN_ON(ret == -ETIME);
339 }
340
341 hmdfs_client_writepage_err(peer, info, ctx, ret);
342
343 cleanup_all:
344 put_task_struct(ctx->caller);
345 kfree(ctx);
346 cleanup_req:
347 kfree(req->data);
348
349 trace_hmdfs_writepage_cb_exit(peer, info->remote_ino, page_index, ret);
350 }
351
hmdfs_client_writepage(struct hmdfs_peer * con,struct hmdfs_writepage_context * param)352 int hmdfs_client_writepage(struct hmdfs_peer *con,
353 struct hmdfs_writepage_context *param)
354 {
355 int ret = 0;
356 size_t send_len = sizeof(struct writepage_request) + HMDFS_PAGE_SIZE;
357 struct writepage_request *write_data = kzalloc(send_len, GFP_NOFS);
358 struct hmdfs_req req;
359 char *data = NULL;
360
361 if (unlikely(!write_data))
362 return -ENOMEM;
363
364 WARN_ON(!PageLocked(param->page)); // VFS
365 WARN_ON(PageDirty(param->page)); // VFS
366 WARN_ON(!PageWriteback(param->page)); // hmdfs
367
368 write_data->file_ver = cpu_to_le64(param->fid.ver);
369 write_data->file_id = cpu_to_le32(param->fid.id);
370 write_data->index = cpu_to_le64(param->page->index);
371 write_data->count = cpu_to_le32(param->count);
372 data = kmap(param->page);
373 memcpy((char *)write_data->buf, data, HMDFS_PAGE_SIZE);
374 kunmap(param->page);
375 req.data = write_data;
376 req.data_len = send_len;
377
378 req.private = param;
379 req.private_len = sizeof(*param);
380
381 req.timeout = TIMEOUT_CONFIG;
382 hmdfs_init_cmd(&req.operations, F_WRITEPAGE);
383 ret = hmdfs_send_async_request(con, &req);
384 if (unlikely(ret))
385 kfree(write_data);
386 return ret;
387 }
388
hmdfs_client_recv_readpage(struct hmdfs_head_cmd * head,int err,struct hmdfs_async_work * async_work)389 void hmdfs_client_recv_readpage(struct hmdfs_head_cmd *head, int err,
390 struct hmdfs_async_work *async_work)
391 {
392 struct page *page = async_work->page;
393 int ret = le32_to_cpu(head->ret_code);
394 struct hmdfs_inode_info *info = hmdfs_i(page->mapping->host);
395 unsigned long page_index = page->index;
396
397 if (!err)
398 SetPageUptodate(page);
399 else if (err == -EBADF)
400 /* There may be a stale fd caused by fid version, need reopen */
401 set_bit(HMDFS_FID_NEED_OPEN, &info->fid_flags);
402
403 hmdfs_client_resp_statis(async_work->head.peer->sbi, F_READPAGE,
404 HMDFS_RESP_NORMAL, async_work->start, jiffies);
405
406 trace_hmdfs_client_recv_readpage(async_work->head.peer,
407 info->remote_ino, page_index, ret);
408
409 asw_done(async_work);
410 }
411
412 /* read cache dentry file at path and write them into filp */
hmdfs_client_start_readdir(struct hmdfs_peer * con,struct file * filp,const char * path,int path_len,struct hmdfs_dcache_header * header)413 int hmdfs_client_start_readdir(struct hmdfs_peer *con, struct file *filp,
414 const char *path, int path_len,
415 struct hmdfs_dcache_header *header)
416 {
417 int ret;
418 size_t send_len = sizeof(struct readdir_request) + path_len + 1;
419 struct readdir_request *req = kzalloc(send_len, GFP_KERNEL);
420 struct hmdfs_send_command sm = {
421 .data = req,
422 .len = send_len,
423 .local_filp = filp,
424 };
425
426 hmdfs_init_cmd(&sm.operations, F_ITERATE);
427 if (!req)
428 return -ENOMEM;
429
430 /* add ref or it will be release at msg put */
431 get_file(sm.local_filp);
432 req->path_len = cpu_to_le32(path_len);
433 strncpy(req->path, path, path_len);
434
435 /*
436 * Is we already have a cache file, verify it. If it is
437 * uptodate, then we don't have to transfer a new one
438 */
439 if (header) {
440 req->dcache_crtime = header->dcache_crtime;
441 req->dcache_crtime_nsec = header->dcache_crtime_nsec;
442 req->dentry_ctime = header->dentry_ctime;
443 req->dentry_ctime_nsec = header->dentry_ctime_nsec;
444 req->num = header->num;
445 req->verify_cache = cpu_to_le32(1);
446 }
447
448 ret = hmdfs_sendmessage_request(con, &sm);
449 kfree(req);
450 return ret;
451 }
452
hmdfs_client_start_mkdir(struct hmdfs_peer * con,const char * path,const char * name,umode_t mode,struct hmdfs_lookup_ret * mkdir_ret)453 int hmdfs_client_start_mkdir(struct hmdfs_peer *con,
454 const char *path, const char *name,
455 umode_t mode, struct hmdfs_lookup_ret *mkdir_ret)
456 {
457 int ret = 0;
458 int path_len = strlen(path);
459 int name_len = strlen(name);
460 size_t send_len = sizeof(struct mkdir_request) + path_len + 1 +
461 name_len + 1;
462 struct mkdir_request *mkdir_req = kzalloc(send_len, GFP_KERNEL);
463 struct hmdfs_inodeinfo_response *resp = NULL;
464 struct hmdfs_send_command sm = {
465 .data = mkdir_req,
466 .len = send_len,
467 };
468
469 hmdfs_init_cmd(&sm.operations, F_MKDIR);
470 if (!mkdir_req)
471 return -ENOMEM;
472
473 mkdir_req->path_len = cpu_to_le32(path_len);
474 mkdir_req->name_len = cpu_to_le32(name_len);
475 mkdir_req->mode = cpu_to_le16(mode);
476 strncpy(mkdir_req->path, path, path_len);
477 strncpy(mkdir_req->path + path_len + 1, name, name_len);
478
479 ret = hmdfs_sendmessage_request(con, &sm);
480 if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP)
481 goto out;
482 if (!sm.out_buf) {
483 ret = -ENOENT;
484 goto out;
485 }
486 resp = sm.out_buf;
487 mkdir_ret->i_mode = le16_to_cpu(resp->i_mode);
488 mkdir_ret->i_size = le64_to_cpu(resp->i_size);
489 mkdir_ret->i_mtime = le64_to_cpu(resp->i_mtime);
490 mkdir_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec);
491 mkdir_ret->i_ino = le64_to_cpu(resp->i_ino);
492
493 out:
494 free_sm_outbuf(&sm);
495 kfree(mkdir_req);
496 return ret;
497 }
498
hmdfs_client_start_create(struct hmdfs_peer * con,const char * path,const char * name,umode_t mode,bool want_excl,struct hmdfs_lookup_ret * create_ret)499 int hmdfs_client_start_create(struct hmdfs_peer *con,
500 const char *path, const char *name,
501 umode_t mode, bool want_excl,
502 struct hmdfs_lookup_ret *create_ret)
503 {
504 int ret = 0;
505 int path_len = strlen(path);
506 int name_len = strlen(name);
507 size_t send_len = sizeof(struct create_request) + path_len + 1 +
508 name_len + 1;
509 struct create_request *create_req = kzalloc(send_len, GFP_KERNEL);
510 struct hmdfs_inodeinfo_response *resp = NULL;
511 struct hmdfs_send_command sm = {
512 .data = create_req,
513 .len = send_len,
514 };
515
516 hmdfs_init_cmd(&sm.operations, F_CREATE);
517 if (!create_req)
518 return -ENOMEM;
519
520 create_req->path_len = cpu_to_le32(path_len);
521 create_req->name_len = cpu_to_le32(name_len);
522 create_req->mode = cpu_to_le16(mode);
523 create_req->want_excl = want_excl;
524 strncpy(create_req->path, path, path_len);
525 strncpy(create_req->path + path_len + 1, name, name_len);
526
527 ret = hmdfs_sendmessage_request(con, &sm);
528 if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP)
529 goto out;
530 if (!sm.out_buf) {
531 ret = -ENOENT;
532 goto out;
533 }
534 resp = sm.out_buf;
535 create_ret->i_mode = le16_to_cpu(resp->i_mode);
536 create_ret->i_size = le64_to_cpu(resp->i_size);
537 create_ret->i_mtime = le64_to_cpu(resp->i_mtime);
538 create_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec);
539 create_ret->i_ino = le64_to_cpu(resp->i_ino);
540
541 out:
542 free_sm_outbuf(&sm);
543 kfree(create_req);
544 return ret;
545 }
546
hmdfs_client_start_rmdir(struct hmdfs_peer * con,const char * path,const char * name)547 int hmdfs_client_start_rmdir(struct hmdfs_peer *con, const char *path,
548 const char *name)
549 {
550 int ret;
551 int path_len = strlen(path);
552 int name_len = strlen(name);
553 size_t send_len = sizeof(struct rmdir_request) + path_len + 1 +
554 name_len + 1;
555 struct rmdir_request *rmdir_req = kzalloc(send_len, GFP_KERNEL);
556 struct hmdfs_send_command sm = {
557 .data = rmdir_req,
558 .len = send_len,
559 };
560
561 hmdfs_init_cmd(&sm.operations, F_RMDIR);
562 if (!rmdir_req)
563 return -ENOMEM;
564
565 rmdir_req->path_len = cpu_to_le32(path_len);
566 rmdir_req->name_len = cpu_to_le32(name_len);
567 strncpy(rmdir_req->path, path, path_len);
568 strncpy(rmdir_req->path + path_len + 1, name, name_len);
569
570 ret = hmdfs_sendmessage_request(con, &sm);
571 free_sm_outbuf(&sm);
572 kfree(rmdir_req);
573 return ret;
574 }
575
hmdfs_client_start_unlink(struct hmdfs_peer * con,const char * path,const char * name)576 int hmdfs_client_start_unlink(struct hmdfs_peer *con, const char *path,
577 const char *name)
578 {
579 int ret;
580 int path_len = strlen(path);
581 int name_len = strlen(name);
582 size_t send_len = sizeof(struct unlink_request) + path_len + 1 +
583 name_len + 1;
584 struct unlink_request *unlink_req = kzalloc(send_len, GFP_KERNEL);
585 struct hmdfs_send_command sm = {
586 .data = unlink_req,
587 .len = send_len,
588 };
589
590 hmdfs_init_cmd(&sm.operations, F_UNLINK);
591 if (!unlink_req)
592 return -ENOMEM;
593
594 unlink_req->path_len = cpu_to_le32(path_len);
595 unlink_req->name_len = cpu_to_le32(name_len);
596 strncpy(unlink_req->path, path, path_len);
597 strncpy(unlink_req->path + path_len + 1, name, name_len);
598
599 ret = hmdfs_sendmessage_request(con, &sm);
600 kfree(unlink_req);
601 free_sm_outbuf(&sm);
602 return ret;
603 }
604
hmdfs_client_start_rename(struct hmdfs_peer * con,const char * old_path,const char * old_name,const char * new_path,const char * new_name,unsigned int flags)605 int hmdfs_client_start_rename(struct hmdfs_peer *con, const char *old_path,
606 const char *old_name, const char *new_path,
607 const char *new_name, unsigned int flags)
608 {
609 int ret;
610 int old_path_len = strlen(old_path);
611 int new_path_len = strlen(new_path);
612 int old_name_len = strlen(old_name);
613 int new_name_len = strlen(new_name);
614
615 size_t send_len = sizeof(struct rename_request) + old_path_len + 1 +
616 new_path_len + 1 + old_name_len + 1 + new_name_len +
617 1;
618 struct rename_request *rename_req = kzalloc(send_len, GFP_KERNEL);
619 struct hmdfs_send_command sm = {
620 .data = rename_req,
621 .len = send_len,
622 };
623
624 hmdfs_init_cmd(&sm.operations, F_RENAME);
625 if (!rename_req)
626 return -ENOMEM;
627
628 rename_req->old_path_len = cpu_to_le32(old_path_len);
629 rename_req->new_path_len = cpu_to_le32(new_path_len);
630 rename_req->old_name_len = cpu_to_le32(old_name_len);
631 rename_req->new_name_len = cpu_to_le32(new_name_len);
632 rename_req->flags = cpu_to_le32(flags);
633
634 strncpy(rename_req->path, old_path, old_path_len);
635 strncpy(rename_req->path + old_path_len + 1, new_path, new_path_len);
636
637 strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1,
638 old_name, old_name_len);
639 strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1 +
640 old_name_len + 1,
641 new_name, new_name_len);
642
643 ret = hmdfs_sendmessage_request(con, &sm);
644 free_sm_outbuf(&sm);
645 kfree(rename_req);
646 return ret;
647 }
648
hmdfs_send_setattr(struct hmdfs_peer * con,const char * send_buf,struct setattr_info * attr_info)649 int hmdfs_send_setattr(struct hmdfs_peer *con, const char *send_buf,
650 struct setattr_info *attr_info)
651 {
652 int ret;
653 int path_len = strlen(send_buf);
654 size_t send_len = path_len + 1 + sizeof(struct setattr_request);
655 struct setattr_request *setattr_req = kzalloc(send_len, GFP_KERNEL);
656 struct hmdfs_send_command sm = {
657 .data = setattr_req,
658 .len = send_len,
659 };
660
661 hmdfs_init_cmd(&sm.operations, F_SETATTR);
662 if (!setattr_req)
663 return -ENOMEM;
664
665 strcpy(setattr_req->buf, send_buf);
666 setattr_req->path_len = cpu_to_le32(path_len);
667 setattr_req->valid = cpu_to_le32(attr_info->valid);
668 setattr_req->size = cpu_to_le64(attr_info->size);
669 setattr_req->mtime = cpu_to_le64(attr_info->mtime);
670 setattr_req->mtime_nsec = cpu_to_le32(attr_info->mtime_nsec);
671 ret = hmdfs_sendmessage_request(con, &sm);
672 kfree(setattr_req);
673 return ret;
674 }
675
hmdfs_update_getattr_ret(struct getattr_response * resp,struct hmdfs_getattr_ret * result)676 static void hmdfs_update_getattr_ret(struct getattr_response *resp,
677 struct hmdfs_getattr_ret *result)
678 {
679 struct kstat *stat = &result->stat;
680
681 stat->result_mask = le32_to_cpu(resp->result_mask);
682 if (stat->result_mask == 0)
683 return;
684
685 stat->ino = le64_to_cpu(resp->ino);
686 stat->mode = le16_to_cpu(resp->mode);
687 stat->nlink = le32_to_cpu(resp->nlink);
688 stat->uid.val = le32_to_cpu(resp->uid);
689 stat->gid.val = le32_to_cpu(resp->gid);
690 stat->size = le64_to_cpu(resp->size);
691 stat->blocks = le64_to_cpu(resp->blocks);
692 stat->blksize = le32_to_cpu(resp->blksize);
693 stat->atime.tv_sec = le64_to_cpu(resp->atime);
694 stat->atime.tv_nsec = le32_to_cpu(resp->atime_nsec);
695 stat->mtime.tv_sec = le64_to_cpu(resp->mtime);
696 stat->mtime.tv_nsec = le32_to_cpu(resp->mtime_nsec);
697 stat->ctime.tv_sec = le64_to_cpu(resp->ctime);
698 stat->ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec);
699 stat->btime.tv_sec = le64_to_cpu(resp->crtime);
700 stat->btime.tv_nsec = le32_to_cpu(resp->crtime_nsec);
701 result->fsid = le64_to_cpu(resp->fsid);
702 /* currently not used */
703 result->i_flags = 0;
704 }
705
hmdfs_send_getattr(struct hmdfs_peer * con,const char * send_buf,unsigned int lookup_flags,struct hmdfs_getattr_ret * result)706 int hmdfs_send_getattr(struct hmdfs_peer *con, const char *send_buf,
707 unsigned int lookup_flags,
708 struct hmdfs_getattr_ret *result)
709 {
710 int path_len = strlen(send_buf);
711 size_t send_len = path_len + 1 + sizeof(struct getattr_request);
712 int ret = 0;
713 struct getattr_request *req = kzalloc(send_len, GFP_KERNEL);
714 struct hmdfs_send_command sm = {
715 .data = req,
716 .len = send_len,
717 };
718
719 hmdfs_init_cmd(&sm.operations, F_GETATTR);
720 if (!req)
721 return -ENOMEM;
722
723 req->path_len = cpu_to_le32(path_len);
724 req->lookup_flags = cpu_to_le32(lookup_flags);
725 strncpy(req->buf, send_buf, path_len);
726 ret = hmdfs_sendmessage_request(con, &sm);
727 if (!ret && (sm.out_len == 0 || !sm.out_buf))
728 ret = -ENOENT;
729 if (ret)
730 goto out;
731
732 hmdfs_update_getattr_ret(sm.out_buf, result);
733
734 out:
735 kfree(req);
736 free_sm_outbuf(&sm);
737 return ret;
738 }
739
hmdfs_update_statfs_ret(struct statfs_response * resp,struct kstatfs * buf)740 static void hmdfs_update_statfs_ret(struct statfs_response *resp,
741 struct kstatfs *buf)
742 {
743 buf->f_type = le64_to_cpu(resp->f_type);
744 buf->f_bsize = le64_to_cpu(resp->f_bsize);
745 buf->f_blocks = le64_to_cpu(resp->f_blocks);
746 buf->f_bfree = le64_to_cpu(resp->f_bfree);
747 buf->f_bavail = le64_to_cpu(resp->f_bavail);
748 buf->f_files = le64_to_cpu(resp->f_files);
749 buf->f_ffree = le64_to_cpu(resp->f_ffree);
750 buf->f_fsid.val[0] = le32_to_cpu(resp->f_fsid_0);
751 buf->f_fsid.val[1] = le32_to_cpu(resp->f_fsid_1);
752 buf->f_namelen = le64_to_cpu(resp->f_namelen);
753 buf->f_frsize = le64_to_cpu(resp->f_frsize);
754 buf->f_flags = le64_to_cpu(resp->f_flags);
755 buf->f_spare[0] = le64_to_cpu(resp->f_spare_0);
756 buf->f_spare[1] = le64_to_cpu(resp->f_spare_1);
757 buf->f_spare[2] = le64_to_cpu(resp->f_spare_2);
758 buf->f_spare[3] = le64_to_cpu(resp->f_spare_3);
759 }
760
hmdfs_send_statfs(struct hmdfs_peer * con,const char * path,struct kstatfs * buf)761 int hmdfs_send_statfs(struct hmdfs_peer *con, const char *path,
762 struct kstatfs *buf)
763 {
764 int ret;
765 int path_len = strlen(path);
766 size_t send_len = sizeof(struct statfs_request) + path_len + 1;
767 struct statfs_request *req = kzalloc(send_len, GFP_KERNEL);
768 struct hmdfs_send_command sm = {
769 .data = req,
770 .len = send_len,
771 };
772
773 hmdfs_init_cmd(&sm.operations, F_STATFS);
774 if (!req)
775 return -ENOMEM;
776
777 req->path_len = cpu_to_le32(path_len);
778 strncpy(req->path, path, path_len);
779
780 ret = hmdfs_sendmessage_request(con, &sm);
781
782 if (ret == -ETIME)
783 ret = -EIO;
784 if (!ret && (sm.out_len == 0 || !sm.out_buf))
785 ret = -ENOENT;
786 if (ret)
787 goto out;
788
789 hmdfs_update_statfs_ret(sm.out_buf, buf);
790 out:
791 kfree(req);
792 free_sm_outbuf(&sm);
793 return ret;
794 }
795
hmdfs_send_syncfs(struct hmdfs_peer * con,int syncfs_timeout)796 int hmdfs_send_syncfs(struct hmdfs_peer *con, int syncfs_timeout)
797 {
798 int ret;
799 struct hmdfs_req req;
800 struct hmdfs_sb_info *sbi = con->sbi;
801 struct syncfs_request *syncfs_req =
802 kzalloc(sizeof(struct syncfs_request), GFP_KERNEL);
803
804 if (!syncfs_req) {
805 hmdfs_err("cannot allocate syncfs_request");
806 return -ENOMEM;
807 }
808
809 hmdfs_init_cmd(&req.operations, F_SYNCFS);
810 req.timeout = syncfs_timeout;
811
812 syncfs_req->version = cpu_to_le64(sbi->hsi.version);
813 req.data = syncfs_req;
814 req.data_len = sizeof(*syncfs_req);
815
816 ret = hmdfs_send_async_request(con, &req);
817 if (ret) {
818 kfree(syncfs_req);
819 hmdfs_err("ret fail with %d", ret);
820 }
821
822 return ret;
823 }
824
hmdfs_update_getxattr_ret(struct getxattr_response * resp,void * value,size_t o_size,int * ret)825 static void hmdfs_update_getxattr_ret(struct getxattr_response *resp,
826 void *value, size_t o_size, int *ret)
827 {
828 ssize_t size = le32_to_cpu(resp->size);
829
830 if (o_size && o_size < size) {
831 *ret = -ERANGE;
832 return;
833 }
834
835 if (o_size)
836 memcpy(value, resp->value, size);
837
838 *ret = size;
839 }
840
hmdfs_send_getxattr(struct hmdfs_peer * con,const char * send_buf,const char * name,void * value,size_t size)841 int hmdfs_send_getxattr(struct hmdfs_peer *con, const char *send_buf,
842 const char *name, void *value, size_t size)
843 {
844 size_t path_len = strlen(send_buf);
845 size_t name_len = strlen(name);
846 size_t send_len = path_len + name_len +
847 sizeof(struct getxattr_request) + 2;
848 int ret = 0;
849 struct getxattr_request *req = kzalloc(send_len, GFP_KERNEL);
850 struct hmdfs_send_command sm = {
851 .data = req,
852 .len = send_len,
853 };
854
855 hmdfs_init_cmd(&sm.operations, F_GETXATTR);
856 if (!req)
857 return -ENOMEM;
858
859 req->path_len = cpu_to_le32(path_len);
860 req->name_len = cpu_to_le32(name_len);
861 req->size = cpu_to_le32(size);
862 strncpy(req->buf, send_buf, path_len);
863 strncpy(req->buf + path_len + 1, name, name_len);
864 ret = hmdfs_sendmessage_request(con, &sm);
865 if (!ret && (sm.out_len == 0 || !sm.out_buf))
866 ret = -ENOENT;
867 if (ret)
868 goto out;
869
870 hmdfs_update_getxattr_ret(sm.out_buf, value, size, &ret);
871
872 out:
873 kfree(req);
874 free_sm_outbuf(&sm);
875 return ret;
876 }
877
hmdfs_send_setxattr(struct hmdfs_peer * con,const char * send_buf,const char * name,const void * value,size_t size,int flags)878 int hmdfs_send_setxattr(struct hmdfs_peer *con, const char *send_buf,
879 const char *name, const void *value,
880 size_t size, int flags)
881 {
882 size_t path_len = strlen(send_buf);
883 size_t name_len = strlen(name);
884 size_t send_len = path_len + name_len + size + 2 +
885 sizeof(struct setxattr_request);
886 int ret = 0;
887 struct setxattr_request *req = kzalloc(send_len, GFP_KERNEL);
888 struct hmdfs_send_command sm = {
889 .data = req,
890 .len = send_len,
891 };
892
893 hmdfs_init_cmd(&sm.operations, F_SETXATTR);
894 if (!req)
895 return -ENOMEM;
896
897 req->path_len = cpu_to_le32(path_len);
898 req->name_len = cpu_to_le32(name_len);
899 req->size = cpu_to_le32(size);
900 req->flags = cpu_to_le32(flags);
901 strncpy(req->buf, send_buf, path_len);
902 strncpy(req->buf + path_len + 1, name, name_len);
903 if (!value)
904 req->del = true;
905 else
906 memcpy(req->buf + path_len + name_len + 2, value, size);
907
908 ret = hmdfs_sendmessage_request(con, &sm);
909 kfree(req);
910 return ret;
911 }
912
hmdfs_update_listxattr_ret(struct listxattr_response * resp,char * list,size_t o_size,ssize_t * ret)913 static void hmdfs_update_listxattr_ret(struct listxattr_response *resp,
914 char *list, size_t o_size, ssize_t *ret)
915 {
916 ssize_t size = le32_to_cpu(resp->size);
917
918 if (o_size && o_size < size) {
919 *ret = -ERANGE;
920 return;
921 }
922
923 /* multi name split with '\0', use memcpy */
924 if (o_size)
925 memcpy(list, resp->list, size);
926
927 *ret = size;
928 }
929
hmdfs_send_listxattr(struct hmdfs_peer * con,const char * send_buf,char * list,size_t size)930 ssize_t hmdfs_send_listxattr(struct hmdfs_peer *con, const char *send_buf,
931 char *list, size_t size)
932 {
933 size_t path_len = strlen(send_buf);
934 size_t send_len = path_len + 1 + sizeof(struct listxattr_request);
935 ssize_t ret = 0;
936 struct listxattr_request *req = kzalloc(send_len, GFP_KERNEL);
937 struct hmdfs_send_command sm = {
938 .data = req,
939 .len = send_len,
940 };
941
942 hmdfs_init_cmd(&sm.operations, F_LISTXATTR);
943 if (!req)
944 return -ENOMEM;
945
946 req->path_len = cpu_to_le32(path_len);
947 req->size = cpu_to_le32(size);
948 strncpy(req->buf, send_buf, path_len);
949 ret = hmdfs_sendmessage_request(con, &sm);
950 if (!ret && (sm.out_len == 0 || !sm.out_buf))
951 ret = -ENOENT;
952 if (ret)
953 goto out;
954
955 hmdfs_update_listxattr_ret(sm.out_buf, list, size, &ret);
956
957 out:
958 kfree(req);
959 free_sm_outbuf(&sm);
960 return ret;
961 }
962
hmdfs_recv_syncfs_cb(struct hmdfs_peer * peer,const struct hmdfs_req * req,const struct hmdfs_resp * resp)963 void hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req,
964 const struct hmdfs_resp *resp)
965 {
966 struct hmdfs_sb_info *sbi = peer->sbi;
967 struct syncfs_request *syncfs_req = (struct syncfs_request *)req->data;
968
969 WARN_ON(!syncfs_req);
970 spin_lock(&sbi->hsi.v_lock);
971 if (le64_to_cpu(syncfs_req->version) != sbi->hsi.version) {
972 hmdfs_info(
973 "Recv stale syncfs resp[ver: %llu] from device %llu, current ver %llu",
974 le64_to_cpu(syncfs_req->version), peer->device_id,
975 sbi->hsi.version);
976 spin_unlock(&sbi->hsi.v_lock);
977 goto out;
978 }
979
980 if (!sbi->hsi.remote_ret)
981 sbi->hsi.remote_ret = resp->ret_code;
982
983 if (resp->ret_code) {
984 hmdfs_err("Recv syncfs error code %d from device %llu",
985 resp->ret_code, peer->device_id);
986 } else {
987 /*
988 * Set @sb_dirty_count to zero if no one else produce
989 * dirty data on remote server during remote sync.
990 */
991 atomic64_cmpxchg(&peer->sb_dirty_count,
992 peer->old_sb_dirty_count, 0);
993 }
994
995 atomic_dec(&sbi->hsi.wait_count);
996 spin_unlock(&sbi->hsi.v_lock);
997 wake_up_interruptible(&sbi->hsi.wq);
998
999 out:
1000 kfree(syncfs_req);
1001 }
1002
hmdfs_send_drop_push(struct hmdfs_peer * con,const char * path)1003 void hmdfs_send_drop_push(struct hmdfs_peer *con, const char *path)
1004 {
1005 int path_len = strlen(path);
1006 size_t send_len = sizeof(struct drop_push_request) + path_len + 1;
1007 struct drop_push_request *dp_req = kzalloc(send_len, GFP_KERNEL);
1008 struct hmdfs_send_command sm = {
1009 .data = dp_req,
1010 .len = send_len,
1011 };
1012
1013 hmdfs_init_cmd(&sm.operations, F_DROP_PUSH);
1014 if (!dp_req)
1015 return;
1016
1017 dp_req->path_len = cpu_to_le32(path_len);
1018 strncpy(dp_req->path, path, path_len);
1019
1020 hmdfs_sendmessage_request(con, &sm);
1021 kfree(dp_req);
1022 }
1023
hmdfs_get_msg_next(struct hmdfs_peer * peer,int * id)1024 static void *hmdfs_get_msg_next(struct hmdfs_peer *peer, int *id)
1025 {
1026 struct hmdfs_msg_idr_head *head = NULL;
1027
1028 spin_lock(&peer->idr_lock);
1029 head = idr_get_next(&peer->msg_idr, id);
1030 if (head && head->type < MSG_IDR_MAX && head->type >= 0)
1031 kref_get(&head->ref);
1032
1033 spin_unlock(&peer->idr_lock);
1034
1035 return head;
1036 }
1037
hmdfs_client_offline_notify(struct hmdfs_peer * conn,int evt,unsigned int seq)1038 void hmdfs_client_offline_notify(struct hmdfs_peer *conn, int evt,
1039 unsigned int seq)
1040 {
1041 int id;
1042 int count = 0;
1043 struct hmdfs_msg_idr_head *head = NULL;
1044
1045 for (id = 0; (head = hmdfs_get_msg_next(conn, &id)) != NULL; ++id) {
1046 switch (head->type) {
1047 case MSG_IDR_1_0_NONE:
1048 head_put(head);
1049 head_put(head);
1050 break;
1051 case MSG_IDR_MESSAGE_SYNC:
1052 case MSG_IDR_1_0_MESSAGE_SYNC:
1053 hmdfs_response_wakeup((struct sendmsg_wait_queue *)head,
1054 -ETIME, 0, NULL);
1055 hmdfs_debug("wakeup id=%d", head->msg_id);
1056 msg_put((struct sendmsg_wait_queue *)head);
1057 break;
1058 case MSG_IDR_MESSAGE_ASYNC:
1059 hmdfs_wakeup_parasite(
1060 (struct hmdfs_msg_parasite *)head);
1061 hmdfs_debug("wakeup parasite id=%d", head->msg_id);
1062 mp_put((struct hmdfs_msg_parasite *)head);
1063 break;
1064 case MSG_IDR_PAGE:
1065 case MSG_IDR_1_0_PAGE:
1066 hmdfs_wakeup_async_work(
1067 (struct hmdfs_async_work *)head);
1068 hmdfs_debug("wakeup async work id=%d", head->msg_id);
1069 asw_put((struct hmdfs_async_work *)head);
1070 break;
1071 default:
1072 hmdfs_err("Bad type=%d id=%d", head->type,
1073 head->msg_id);
1074 break;
1075 }
1076
1077 count++;
1078 /* If there are too many idr to process, avoid to soft lockup,
1079 * process every 512 message we resched
1080 */
1081 if (count % HMDFS_IDR_RESCHED_COUNT == 0)
1082 cond_resched();
1083 }
1084 }
1085
1086 static struct hmdfs_node_cb_desc client_cb[] = {
1087 {
1088 .evt = NODE_EVT_OFFLINE,
1089 .sync = true,
1090 .fn = hmdfs_client_offline_notify,
1091 },
1092 };
1093
hmdfs_client_add_node_evt_cb(void)1094 void __init hmdfs_client_add_node_evt_cb(void)
1095 {
1096 hmdfs_node_add_evt_cb(client_cb, ARRAY_SIZE(client_cb));
1097 }
1098