• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/hmdfs/hmdfs_client.c
4  *
5  * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6  */
7 
8 #include "hmdfs_client.h"
9 #include "hmdfs_server.h"
10 
11 #include <linux/highmem.h>
12 #include <linux/sched/signal.h>
13 #include <linux/statfs.h>
14 
15 #include "comm/socket_adapter.h"
16 #include "hmdfs_dentryfile.h"
17 #include "hmdfs_trace.h"
18 #include "comm/node_cb.h"
19 #include "stash.h"
20 #include "authority/authentication.h"
21 
22 #define HMDFS_SYNC_WPAGE_RETRY_MS 2000
23 
free_sm_outbuf(struct hmdfs_send_command * sm)24 static inline void free_sm_outbuf(struct hmdfs_send_command *sm)
25 {
26 	if (sm->out_buf && sm->out_len != 0)
27 		kfree(sm->out_buf);
28 	sm->out_len = 0;
29 	sm->out_buf = NULL;
30 }
31 
hmdfs_send_open(struct hmdfs_peer * con,const char * send_buf,__u8 file_type,struct hmdfs_open_ret * open_ret)32 int hmdfs_send_open(struct hmdfs_peer *con, const char *send_buf,
33 		    __u8 file_type, struct hmdfs_open_ret *open_ret)
34 {
35 	int ret;
36 	int path_len = strlen(send_buf);
37 	size_t send_len = sizeof(struct open_request) + path_len + 1;
38 	struct open_request *open_req = kzalloc(send_len, GFP_KERNEL);
39 	struct open_response *resp;
40 	struct hmdfs_send_command sm = {
41 		.data = open_req,
42 		.len = send_len,
43 		.out_buf = NULL,
44 		.local_filp = NULL,
45 	};
46 	hmdfs_init_cmd(&sm.operations, F_OPEN);
47 
48 	if (!open_req) {
49 		ret = -ENOMEM;
50 		goto out;
51 	}
52 	open_req->file_type = file_type;
53 	open_req->path_len = cpu_to_le32(path_len);
54 	strcpy(open_req->buf, send_buf);
55 	ret = hmdfs_sendmessage_request(con, &sm);
56 	kfree(open_req);
57 
58 	if (!ret && (sm.out_len == 0 || !sm.out_buf))
59 		ret = -ENOENT;
60 	if (ret)
61 		goto out;
62 	resp = sm.out_buf;
63 
64 	open_ret->ino = le64_to_cpu(resp->ino);
65 	open_ret->fid.ver = le64_to_cpu(resp->file_ver);
66 	open_ret->fid.id = le32_to_cpu(resp->file_id);
67 	open_ret->file_size = le64_to_cpu(resp->file_size);
68 	open_ret->remote_ctime.tv_sec = le64_to_cpu(resp->ctime);
69 	open_ret->remote_ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec);
70 	open_ret->stable_ctime.tv_sec = le64_to_cpu(resp->stable_ctime);
71 	open_ret->stable_ctime.tv_nsec = le32_to_cpu(resp->stable_ctime_nsec);
72 
73 out:
74 	free_sm_outbuf(&sm);
75 	return ret;
76 }
77 
hmdfs_send_close(struct hmdfs_peer * con,const struct hmdfs_fid * fid)78 void hmdfs_send_close(struct hmdfs_peer *con, const struct hmdfs_fid *fid)
79 {
80 	size_t send_len = sizeof(struct release_request);
81 	struct release_request *release_req = kzalloc(send_len, GFP_KERNEL);
82 	struct hmdfs_send_command sm = {
83 		.data = release_req,
84 		.len = send_len,
85 		.local_filp = NULL,
86 	};
87 	hmdfs_init_cmd(&sm.operations, F_RELEASE);
88 
89 	if (!release_req)
90 		return;
91 
92 	release_req->file_ver = cpu_to_le64(fid->ver);
93 	release_req->file_id = cpu_to_le32(fid->id);
94 
95 	hmdfs_sendmessage_request(con, &sm);
96 	kfree(release_req);
97 }
98 
hmdfs_send_fsync(struct hmdfs_peer * con,const struct hmdfs_fid * fid,__s64 start,__s64 end,__s32 datasync)99 int hmdfs_send_fsync(struct hmdfs_peer *con, const struct hmdfs_fid *fid,
100 		     __s64 start, __s64 end, __s32 datasync)
101 {
102 	int ret;
103 	struct fsync_request *fsync_req =
104 		kzalloc(sizeof(struct fsync_request), GFP_KERNEL);
105 	struct hmdfs_send_command sm = {
106 		.data = fsync_req,
107 		.len = sizeof(struct fsync_request),
108 		.out_buf = NULL,
109 		.local_filp = NULL,
110 	};
111 
112 	hmdfs_init_cmd(&sm.operations, F_FSYNC);
113 	if (!fsync_req)
114 		return -ENOMEM;
115 
116 	fsync_req->file_ver = cpu_to_le64(fid->ver);
117 	fsync_req->file_id = cpu_to_le32(fid->id);
118 	fsync_req->datasync = cpu_to_le32(datasync);
119 	fsync_req->start = cpu_to_le64(start);
120 	fsync_req->end = cpu_to_le64(end);
121 
122 	ret = hmdfs_sendmessage_request(con, &sm);
123 
124 	free_sm_outbuf(&sm);
125 	kfree(fsync_req);
126 	return ret;
127 }
128 
hmdfs_client_readpage(struct hmdfs_peer * con,const struct hmdfs_fid * fid,struct page * page)129 int hmdfs_client_readpage(struct hmdfs_peer *con, const struct hmdfs_fid *fid,
130 			  struct page *page)
131 {
132 	int ret;
133 	size_t send_len = sizeof(struct readpage_request);
134 	struct readpage_request *read_data = kzalloc(send_len, GFP_KERNEL);
135 	struct hmdfs_send_command sm = {
136 		.data = read_data,
137 		.len = send_len,
138 		.local_filp = NULL,
139 	};
140 
141 	hmdfs_init_cmd(&sm.operations, F_READPAGE);
142 	if (!read_data) {
143 		unlock_page(page);
144 		return -ENOMEM;
145 	}
146 
147 	sm.out_buf = page;
148 	read_data->file_ver = cpu_to_le64(fid->ver);
149 	read_data->file_id = cpu_to_le32(fid->id);
150 	read_data->size = cpu_to_le32(HMDFS_PAGE_SIZE);
151 	read_data->index = cpu_to_le64(page->index);
152 	ret = hmdfs_sendpage_request(con, &sm);
153 	kfree(read_data);
154 	return ret;
155 }
156 
hmdfs_usr_sig_pending(struct task_struct * p)157 bool hmdfs_usr_sig_pending(struct task_struct *p)
158 {
159 	sigset_t *sig = &p->pending.signal;
160 
161 	if (likely(!signal_pending(p)))
162 		return false;
163 	return sigismember(sig, SIGINT) || sigismember(sig, SIGTERM) ||
164 	       sigismember(sig, SIGKILL);
165 }
166 
hmdfs_client_writepage_done(struct hmdfs_inode_info * info,struct hmdfs_writepage_context * ctx)167 void hmdfs_client_writepage_done(struct hmdfs_inode_info *info,
168 				 struct hmdfs_writepage_context *ctx)
169 {
170 	struct page *page = ctx->page;
171 	bool unlock = ctx->rsem_held;
172 
173 	SetPageUptodate(page);
174 	end_page_writeback(page);
175 	if (unlock)
176 		up_read(&info->wpage_sem);
177 	unlock_page(page);
178 }
179 
hmdfs_client_writepage_err(struct hmdfs_peer * peer,struct hmdfs_inode_info * info,struct hmdfs_writepage_context * ctx,int err)180 static void hmdfs_client_writepage_err(struct hmdfs_peer *peer,
181 				       struct hmdfs_inode_info *info,
182 				       struct hmdfs_writepage_context *ctx,
183 				       int err)
184 {
185 	struct page *page = ctx->page;
186 	bool unlock = ctx->rsem_held;
187 
188 	if (err == -ENOMEM || err == -EAGAIN || err == -ESHUTDOWN ||
189 	    err == -ETIME)
190 		SetPageUptodate(page);
191 	else
192 		hmdfs_info("Page %ld of file %u writeback err %d devid %llu",
193 			   page->index, ctx->fid.id, err, peer->device_id);
194 
195 	/*
196 	 * Current and subsequent writebacks have been canceled by the
197 	 * user, leaving these pages' states in chaos. Read pages in
198 	 * the future to update these pages.
199 	 */
200 	if (ctx->sync_all && hmdfs_usr_sig_pending(ctx->caller))
201 		ClearPageUptodate(page);
202 
203 	if (ctx->sync_all || !time_is_after_eq_jiffies(ctx->timeout) ||
204 	    !(err == -ETIME || hmdfs_need_redirty_page(info, err))) {
205 		SetPageError(page);
206 		mapping_set_error(page->mapping, -EIO);
207 	} else {
208 		__set_page_dirty_nobuffers(page);
209 		account_page_redirty(page);
210 	}
211 
212 	end_page_writeback(page);
213 	if (unlock)
214 		up_read(&info->wpage_sem);
215 	unlock_page(page);
216 }
217 
218 static inline bool
hmdfs_no_timedout_sync_write(struct hmdfs_writepage_context * ctx)219 hmdfs_no_timedout_sync_write(struct hmdfs_writepage_context *ctx)
220 {
221 	return ctx->sync_all && time_is_after_eq_jiffies(ctx->timeout);
222 }
223 
224 static inline bool
hmdfs_client_rewrite_for_timeout(struct hmdfs_writepage_context * ctx,int err)225 hmdfs_client_rewrite_for_timeout(struct hmdfs_writepage_context *ctx, int err)
226 {
227 	return (err == -ETIME && hmdfs_no_timedout_sync_write(ctx) &&
228 		!hmdfs_usr_sig_pending(ctx->caller));
229 }
230 
231 static inline bool
hmdfs_client_rewrite_for_offline(struct hmdfs_sb_info * sbi,struct hmdfs_writepage_context * ctx,int err)232 hmdfs_client_rewrite_for_offline(struct hmdfs_sb_info *sbi,
233 				 struct hmdfs_writepage_context *ctx, int err)
234 {
235 	struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
236 	unsigned int status = READ_ONCE(info->stash_status);
237 
238 	/*
239 	 * No retry if offline occurs during inode restoration.
240 	 *
241 	 * Do retry if local file cache is ready even it is not
242 	 * a WB_SYNC_ALL write, else no-sync_all writeback will
243 	 * return -EIO, mapping_set_error(mapping, -EIO) will be
244 	 * called and it will make the concurrent calling of
245 	 * filemap_write_and_wait() in hmdfs_flush_stash_file_data()
246 	 * return -EIO.
247 	 */
248 	return (hmdfs_is_stash_enabled(sbi) &&
249 		status != HMDFS_REMOTE_INODE_RESTORING &&
250 		(hmdfs_no_timedout_sync_write(ctx) ||
251 		 status == HMDFS_REMOTE_INODE_STASHING) &&
252 		hmdfs_is_offline_or_timeout_err(err));
253 }
254 
255 static inline bool
hmdfs_client_redo_writepage(struct hmdfs_sb_info * sbi,struct hmdfs_writepage_context * ctx,int err)256 hmdfs_client_redo_writepage(struct hmdfs_sb_info *sbi,
257 			    struct hmdfs_writepage_context *ctx, int err)
258 {
259 	return hmdfs_client_rewrite_for_timeout(ctx, err) ||
260 	       hmdfs_client_rewrite_for_offline(sbi, ctx, err);
261 }
262 
hmdfs_remote_write_to_remote(struct hmdfs_inode_info * info)263 static bool hmdfs_remote_write_to_remote(struct hmdfs_inode_info *info)
264 {
265 	unsigned int status = READ_ONCE(info->stash_status);
266 	bool stashing;
267 
268 	if (status != HMDFS_REMOTE_INODE_STASHING)
269 		return true;
270 
271 	/* Ensure it's OK to use info->cache afterwards */
272 	spin_lock(&info->stash_lock);
273 	stashing = (info->stash_status == HMDFS_REMOTE_INODE_STASHING);
274 	spin_unlock(&info->stash_lock);
275 
276 	return !stashing;
277 }
278 
hmdfs_remote_do_writepage(struct hmdfs_peer * con,struct hmdfs_writepage_context * ctx)279 int hmdfs_remote_do_writepage(struct hmdfs_peer *con,
280 			      struct hmdfs_writepage_context *ctx)
281 {
282 	struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
283 	bool to_remote = false;
284 	int err = 0;
285 
286 	to_remote = hmdfs_remote_write_to_remote(info);
287 	if (to_remote)
288 		err = hmdfs_client_writepage(info->conn, ctx);
289 	else
290 		err = hmdfs_stash_writepage(info->conn, ctx);
291 	if (!err)
292 		return 0;
293 
294 	if (!(to_remote &&
295 	      hmdfs_client_rewrite_for_offline(con->sbi, ctx, err)))
296 		return err;
297 
298 	queue_delayed_work(con->retry_wb_wq, &ctx->retry_dwork,
299 			   msecs_to_jiffies(HMDFS_SYNC_WPAGE_RETRY_MS));
300 
301 	return 0;
302 }
303 
hmdfs_remote_writepage_retry(struct work_struct * work)304 void hmdfs_remote_writepage_retry(struct work_struct *work)
305 {
306 	struct hmdfs_writepage_context *ctx =
307 		container_of(work, struct hmdfs_writepage_context,
308 			     retry_dwork.work);
309 	struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
310 	struct hmdfs_peer *peer = info->conn;
311 	const struct cred *old_cred = NULL;
312 	int err;
313 
314 	old_cred = hmdfs_override_creds(peer->sbi->cred);
315 	err = hmdfs_remote_do_writepage(peer, ctx);
316 	hmdfs_revert_creds(old_cred);
317 	if (err) {
318 		hmdfs_client_writepage_err(peer, info, ctx, err);
319 		put_task_struct(ctx->caller);
320 		kfree(ctx);
321 	}
322 }
323 
hmdfs_writepage_cb(struct hmdfs_peer * peer,const struct hmdfs_req * req,const struct hmdfs_resp * resp)324 void hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req,
325 			const struct hmdfs_resp *resp)
326 {
327 	struct hmdfs_writepage_context *ctx = req->private;
328 	struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
329 	int ret = resp->ret_code;
330 	unsigned long page_index = ctx->page->index;
331 
332 	trace_hmdfs_writepage_cb_enter(peer, info->remote_ino, page_index, ret);
333 
334 	if (!ret) {
335 		hmdfs_client_writepage_done(info, ctx);
336 		atomic64_inc(&info->write_counter);
337 		goto cleanup_all;
338 	}
339 
340 	if (hmdfs_client_redo_writepage(peer->sbi, ctx, ret)) {
341 		ret = hmdfs_remote_do_writepage(peer, ctx);
342 		if (!ret)
343 			goto cleanup_req;
344 		WARN_ON(ret == -ETIME);
345 	}
346 
347 	hmdfs_client_writepage_err(peer, info, ctx, ret);
348 
349 cleanup_all:
350 	put_task_struct(ctx->caller);
351 	kfree(ctx);
352 cleanup_req:
353 	kfree(req->data);
354 
355 	trace_hmdfs_writepage_cb_exit(peer, info->remote_ino, page_index, ret);
356 }
357 
hmdfs_client_writepage(struct hmdfs_peer * con,struct hmdfs_writepage_context * param)358 int hmdfs_client_writepage(struct hmdfs_peer *con,
359 			   struct hmdfs_writepage_context *param)
360 {
361 	int ret = 0;
362 	size_t send_len = sizeof(struct writepage_request) + HMDFS_PAGE_SIZE;
363 	struct writepage_request *write_data = kzalloc(send_len, GFP_NOFS);
364 	struct hmdfs_req req;
365 	char *data = NULL;
366 
367 	if (unlikely(!write_data))
368 		return -ENOMEM;
369 
370 	WARN_ON(!PageLocked(param->page)); // VFS
371 	WARN_ON(PageDirty(param->page)); // VFS
372 	WARN_ON(!PageWriteback(param->page)); // hmdfs
373 
374 	write_data->file_ver = cpu_to_le64(param->fid.ver);
375 	write_data->file_id = cpu_to_le32(param->fid.id);
376 	write_data->index = cpu_to_le64(param->page->index);
377 	write_data->count = cpu_to_le32(param->count);
378 	data = kmap(param->page);
379 	memcpy((char *)write_data->buf, data, HMDFS_PAGE_SIZE);
380 	kunmap(param->page);
381 	req.data = write_data;
382 	req.data_len = send_len;
383 
384 	req.private = param;
385 	req.private_len = sizeof(*param);
386 
387 	req.timeout = TIMEOUT_CONFIG;
388 	hmdfs_init_cmd(&req.operations, F_WRITEPAGE);
389 	ret = hmdfs_send_async_request(con, &req);
390 	if (unlikely(ret))
391 		kfree(write_data);
392 	return ret;
393 }
394 
hmdfs_client_recv_readpage(struct hmdfs_head_cmd * head,int err,struct hmdfs_async_work * async_work)395 void hmdfs_client_recv_readpage(struct hmdfs_head_cmd *head, int err,
396 				struct hmdfs_async_work *async_work)
397 {
398 	struct page *page = async_work->page;
399 	int ret = le32_to_cpu(head->ret_code);
400 	struct hmdfs_inode_info *info = hmdfs_i(page->mapping->host);
401 	unsigned long page_index = page->index;
402 
403 	if (!err)
404 		SetPageUptodate(page);
405 	else if (err == -EBADF)
406 		/* There may be a stale fd caused by fid version, need reopen */
407 		set_bit(HMDFS_FID_NEED_OPEN, &info->fid_flags);
408 
409 	hmdfs_client_resp_statis(async_work->head.peer->sbi, F_READPAGE,
410 				 HMDFS_RESP_NORMAL, async_work->start, jiffies);
411 
412 	trace_hmdfs_client_recv_readpage(async_work->head.peer,
413 					 info->remote_ino, page_index, ret);
414 
415 	asw_done(async_work);
416 }
417 
418 /* read cache dentry file at path and write them into filp */
hmdfs_client_start_readdir(struct hmdfs_peer * con,struct file * filp,const char * path,int path_len,struct hmdfs_dcache_header * header)419 int hmdfs_client_start_readdir(struct hmdfs_peer *con, struct file *filp,
420 			       const char *path, int path_len,
421 			       struct hmdfs_dcache_header *header)
422 {
423 	int ret;
424 	size_t send_len = sizeof(struct readdir_request) + path_len + 1;
425 	struct readdir_request *req = kzalloc(send_len, GFP_KERNEL);
426 	struct hmdfs_send_command sm = {
427 		.data = req,
428 		.len = send_len,
429 		.local_filp = filp,
430 	};
431 
432 	hmdfs_init_cmd(&sm.operations, F_ITERATE);
433 	if (!req)
434 		return -ENOMEM;
435 
436 	/* add ref or it will be release at msg put */
437 	get_file(sm.local_filp);
438 	req->path_len = cpu_to_le32(path_len);
439 	strncpy(req->path, path, path_len);
440 
441 	/*
442 	 * Is we already have a cache file, verify it. If it is
443 	 * uptodate, then we don't have to transfer a new one
444 	 */
445 	if (header) {
446 		req->dcache_crtime = header->dcache_crtime;
447 		req->dcache_crtime_nsec = header->dcache_crtime_nsec;
448 		req->dentry_ctime = header->dentry_ctime;
449 		req->dentry_ctime_nsec = header->dentry_ctime_nsec;
450 		req->num = header->num;
451 		req->verify_cache = cpu_to_le32(1);
452 	}
453 
454 	ret = hmdfs_sendmessage_request(con, &sm);
455 	kfree(req);
456 	return ret;
457 }
458 
hmdfs_client_start_mkdir(struct hmdfs_peer * con,const char * path,const char * name,umode_t mode,struct hmdfs_lookup_ret * mkdir_ret)459 int hmdfs_client_start_mkdir(struct hmdfs_peer *con,
460 			     const char *path, const char *name,
461 			     umode_t mode, struct hmdfs_lookup_ret *mkdir_ret)
462 {
463 	int ret = 0;
464 	int path_len = strlen(path);
465 	int name_len = strlen(name);
466 	size_t send_len = sizeof(struct mkdir_request) + path_len + 1 +
467 			  name_len + 1;
468 	struct mkdir_request *mkdir_req = kzalloc(send_len, GFP_KERNEL);
469 	struct hmdfs_inodeinfo_response *resp = NULL;
470 	struct hmdfs_send_command sm = {
471 		.data = mkdir_req,
472 		.len = send_len,
473 		.out_buf = NULL,
474 		.local_filp = NULL,
475 	};
476 
477 	hmdfs_init_cmd(&sm.operations, F_MKDIR);
478 	if (!mkdir_req)
479 		return -ENOMEM;
480 
481 	mkdir_req->path_len = cpu_to_le32(path_len);
482 	mkdir_req->name_len = cpu_to_le32(name_len);
483 	mkdir_req->mode = cpu_to_le16(mode);
484 	strncpy(mkdir_req->path, path, path_len);
485 	strncpy(mkdir_req->path + path_len + 1, name, name_len);
486 
487 	ret = hmdfs_sendmessage_request(con, &sm);
488 	if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP)
489 		goto out;
490 	if (!sm.out_buf) {
491 		ret = -ENOENT;
492 		goto out;
493 	}
494 	resp = sm.out_buf;
495 	mkdir_ret->i_mode = le16_to_cpu(resp->i_mode);
496 	mkdir_ret->i_size = le64_to_cpu(resp->i_size);
497 	mkdir_ret->i_mtime = le64_to_cpu(resp->i_mtime);
498 	mkdir_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec);
499 	mkdir_ret->i_ino = le64_to_cpu(resp->i_ino);
500 
501 out:
502 	free_sm_outbuf(&sm);
503 	kfree(mkdir_req);
504 	return ret;
505 }
506 
hmdfs_client_start_create(struct hmdfs_peer * con,const char * path,const char * name,umode_t mode,bool want_excl,struct hmdfs_lookup_ret * create_ret)507 int hmdfs_client_start_create(struct hmdfs_peer *con,
508 			      const char *path, const char *name,
509 			      umode_t mode, bool want_excl,
510 			      struct hmdfs_lookup_ret *create_ret)
511 {
512 	int ret = 0;
513 	int path_len = strlen(path);
514 	int name_len = strlen(name);
515 	size_t send_len = sizeof(struct create_request) + path_len + 1 +
516 			  name_len + 1;
517 	struct create_request *create_req = kzalloc(send_len, GFP_KERNEL);
518 	struct hmdfs_inodeinfo_response *resp = NULL;
519 	struct hmdfs_send_command sm = {
520 		.data = create_req,
521 		.len = send_len,
522 		.out_buf = NULL,
523 		.local_filp = NULL,
524 	};
525 
526 	hmdfs_init_cmd(&sm.operations, F_CREATE);
527 	if (!create_req)
528 		return -ENOMEM;
529 
530 	create_req->path_len = cpu_to_le32(path_len);
531 	create_req->name_len = cpu_to_le32(name_len);
532 	create_req->mode = cpu_to_le16(mode);
533 	create_req->want_excl = want_excl;
534 	strncpy(create_req->path, path, path_len);
535 	strncpy(create_req->path + path_len + 1, name, name_len);
536 
537 	ret = hmdfs_sendmessage_request(con, &sm);
538 	if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP)
539 		goto out;
540 	if (!sm.out_buf) {
541 		ret = -ENOENT;
542 		goto out;
543 	}
544 	resp = sm.out_buf;
545 	create_ret->i_mode = le16_to_cpu(resp->i_mode);
546 	create_ret->i_size = le64_to_cpu(resp->i_size);
547 	create_ret->i_mtime = le64_to_cpu(resp->i_mtime);
548 	create_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec);
549 	create_ret->i_ino = le64_to_cpu(resp->i_ino);
550 
551 out:
552 	free_sm_outbuf(&sm);
553 	kfree(create_req);
554 	return ret;
555 }
556 
hmdfs_client_start_rmdir(struct hmdfs_peer * con,const char * path,const char * name)557 int hmdfs_client_start_rmdir(struct hmdfs_peer *con, const char *path,
558 			     const char *name)
559 {
560 	int ret;
561 	int path_len = strlen(path);
562 	int name_len = strlen(name);
563 	size_t send_len = sizeof(struct rmdir_request) + path_len + 1 +
564 			  name_len + 1;
565 	struct rmdir_request *rmdir_req = kzalloc(send_len, GFP_KERNEL);
566 	struct hmdfs_send_command sm = {
567 		.data = rmdir_req,
568 		.len = send_len,
569 		.out_buf = NULL,
570 		.local_filp = NULL,
571 	};
572 
573 	hmdfs_init_cmd(&sm.operations, F_RMDIR);
574 	if (!rmdir_req)
575 		return -ENOMEM;
576 
577 	rmdir_req->path_len = cpu_to_le32(path_len);
578 	rmdir_req->name_len = cpu_to_le32(name_len);
579 	strncpy(rmdir_req->path, path, path_len);
580 	strncpy(rmdir_req->path + path_len + 1, name, name_len);
581 
582 	ret = hmdfs_sendmessage_request(con, &sm);
583 	free_sm_outbuf(&sm);
584 	kfree(rmdir_req);
585 	return ret;
586 }
587 
hmdfs_client_start_unlink(struct hmdfs_peer * con,const char * path,const char * name)588 int hmdfs_client_start_unlink(struct hmdfs_peer *con, const char *path,
589 			      const char *name)
590 {
591 	int ret;
592 	int path_len = strlen(path);
593 	int name_len = strlen(name);
594 	size_t send_len = sizeof(struct unlink_request) + path_len + 1 +
595 			  name_len + 1;
596 	struct unlink_request *unlink_req = kzalloc(send_len, GFP_KERNEL);
597 	struct hmdfs_send_command sm = {
598 		.data = unlink_req,
599 		.len = send_len,
600 		.out_buf = NULL,
601 		.local_filp = NULL,
602 	};
603 
604 	hmdfs_init_cmd(&sm.operations, F_UNLINK);
605 	if (!unlink_req)
606 		return -ENOMEM;
607 
608 	unlink_req->path_len = cpu_to_le32(path_len);
609 	unlink_req->name_len = cpu_to_le32(name_len);
610 	strncpy(unlink_req->path, path, path_len);
611 	strncpy(unlink_req->path + path_len + 1, name, name_len);
612 
613 	ret = hmdfs_sendmessage_request(con, &sm);
614 	kfree(unlink_req);
615 	free_sm_outbuf(&sm);
616 	return ret;
617 }
618 
hmdfs_client_start_rename(struct hmdfs_peer * con,const char * old_path,const char * old_name,const char * new_path,const char * new_name,unsigned int flags)619 int hmdfs_client_start_rename(struct hmdfs_peer *con, const char *old_path,
620 			      const char *old_name, const char *new_path,
621 			      const char *new_name, unsigned int flags)
622 {
623 	int ret;
624 	int old_path_len = strlen(old_path);
625 	int new_path_len = strlen(new_path);
626 	int old_name_len = strlen(old_name);
627 	int new_name_len = strlen(new_name);
628 
629 	size_t send_len = sizeof(struct rename_request) + old_path_len + 1 +
630 			  new_path_len + 1 + old_name_len + 1 + new_name_len +
631 			  1;
632 	struct rename_request *rename_req = kzalloc(send_len, GFP_KERNEL);
633 	struct hmdfs_send_command sm = {
634 		.data = rename_req,
635 		.len = send_len,
636 		.out_buf = NULL,
637 		.local_filp = NULL,
638 	};
639 
640 	hmdfs_init_cmd(&sm.operations, F_RENAME);
641 	if (!rename_req)
642 		return -ENOMEM;
643 
644 	rename_req->old_path_len = cpu_to_le32(old_path_len);
645 	rename_req->new_path_len = cpu_to_le32(new_path_len);
646 	rename_req->old_name_len = cpu_to_le32(old_name_len);
647 	rename_req->new_name_len = cpu_to_le32(new_name_len);
648 	rename_req->flags = cpu_to_le32(flags);
649 
650 	strncpy(rename_req->path, old_path, old_path_len);
651 	strncpy(rename_req->path + old_path_len + 1, new_path, new_path_len);
652 
653 	strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1,
654 		old_name, old_name_len);
655 	strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1 +
656 			old_name_len + 1,
657 		new_name, new_name_len);
658 
659 	ret = hmdfs_sendmessage_request(con, &sm);
660 	free_sm_outbuf(&sm);
661 	kfree(rename_req);
662 	return ret;
663 }
664 
hmdfs_send_setattr(struct hmdfs_peer * con,const char * send_buf,struct setattr_info * attr_info)665 int hmdfs_send_setattr(struct hmdfs_peer *con, const char *send_buf,
666 		       struct setattr_info *attr_info)
667 {
668 	int ret;
669 	int path_len = strlen(send_buf);
670 	size_t send_len = path_len + 1 + sizeof(struct setattr_request);
671 	struct setattr_request *setattr_req = kzalloc(send_len, GFP_KERNEL);
672 	struct hmdfs_send_command sm = {
673 		.data = setattr_req,
674 		.len = send_len,
675 		.local_filp = NULL,
676 	};
677 
678 	hmdfs_init_cmd(&sm.operations, F_SETATTR);
679 	if (!setattr_req)
680 		return -ENOMEM;
681 
682 	strcpy(setattr_req->buf, send_buf);
683 	setattr_req->path_len = cpu_to_le32(path_len);
684 	setattr_req->valid = cpu_to_le32(attr_info->valid);
685 	setattr_req->size = cpu_to_le64(attr_info->size);
686 	setattr_req->mtime = cpu_to_le64(attr_info->mtime);
687 	setattr_req->mtime_nsec = cpu_to_le32(attr_info->mtime_nsec);
688 	ret = hmdfs_sendmessage_request(con, &sm);
689 	kfree(setattr_req);
690 	return ret;
691 }
692 
hmdfs_update_getattr_ret(struct getattr_response * resp,struct hmdfs_getattr_ret * result)693 static void hmdfs_update_getattr_ret(struct getattr_response *resp,
694 				     struct hmdfs_getattr_ret *result)
695 {
696 	struct kstat *stat = &result->stat;
697 
698 	stat->result_mask = le32_to_cpu(resp->result_mask);
699 	if (stat->result_mask == 0)
700 		return;
701 
702 	stat->ino = le64_to_cpu(resp->ino);
703 	stat->mode = le16_to_cpu(resp->mode);
704 	stat->nlink = le32_to_cpu(resp->nlink);
705 	stat->uid.val = le32_to_cpu(resp->uid);
706 	stat->gid.val = le32_to_cpu(resp->gid);
707 	stat->size = le64_to_cpu(resp->size);
708 	stat->blocks = le64_to_cpu(resp->blocks);
709 	stat->blksize = le32_to_cpu(resp->blksize);
710 	stat->atime.tv_sec = le64_to_cpu(resp->atime);
711 	stat->atime.tv_nsec = le32_to_cpu(resp->atime_nsec);
712 	stat->mtime.tv_sec = le64_to_cpu(resp->mtime);
713 	stat->mtime.tv_nsec = le32_to_cpu(resp->mtime_nsec);
714 	stat->ctime.tv_sec = le64_to_cpu(resp->ctime);
715 	stat->ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec);
716 	stat->btime.tv_sec = le64_to_cpu(resp->crtime);
717 	stat->btime.tv_nsec = le32_to_cpu(resp->crtime_nsec);
718 	result->fsid = le64_to_cpu(resp->fsid);
719 	/* currently not used */
720 	result->i_flags = 0;
721 }
722 
hmdfs_send_getattr(struct hmdfs_peer * con,const char * send_buf,unsigned int lookup_flags,struct hmdfs_getattr_ret * result)723 int hmdfs_send_getattr(struct hmdfs_peer *con, const char *send_buf,
724 		       unsigned int lookup_flags,
725 		       struct hmdfs_getattr_ret *result)
726 {
727 	int path_len = strlen(send_buf);
728 	size_t send_len = path_len + 1 + sizeof(struct getattr_request);
729 	int ret = 0;
730 	struct getattr_request *req = kzalloc(send_len, GFP_KERNEL);
731 	struct hmdfs_send_command sm = {
732 		.data = req,
733 		.len = send_len,
734 		.out_buf = NULL,
735 		.local_filp = NULL,
736 	};
737 
738 	hmdfs_init_cmd(&sm.operations, F_GETATTR);
739 	if (!req)
740 		return -ENOMEM;
741 
742 	req->path_len = cpu_to_le32(path_len);
743 	req->lookup_flags = cpu_to_le32(lookup_flags);
744 	strncpy(req->buf, send_buf, path_len);
745 	ret = hmdfs_sendmessage_request(con, &sm);
746 	if (!ret && (sm.out_len == 0 || !sm.out_buf))
747 		ret = -ENOENT;
748 	if (ret)
749 		goto out;
750 
751 	hmdfs_update_getattr_ret(sm.out_buf, result);
752 
753 out:
754 	kfree(req);
755 	free_sm_outbuf(&sm);
756 	return ret;
757 }
758 
hmdfs_update_statfs_ret(struct statfs_response * resp,struct kstatfs * buf)759 static void hmdfs_update_statfs_ret(struct statfs_response *resp,
760 				    struct kstatfs *buf)
761 {
762 	buf->f_type = le64_to_cpu(resp->f_type);
763 	buf->f_bsize = le64_to_cpu(resp->f_bsize);
764 	buf->f_blocks = le64_to_cpu(resp->f_blocks);
765 	buf->f_bfree = le64_to_cpu(resp->f_bfree);
766 	buf->f_bavail = le64_to_cpu(resp->f_bavail);
767 	buf->f_files = le64_to_cpu(resp->f_files);
768 	buf->f_ffree = le64_to_cpu(resp->f_ffree);
769 	buf->f_fsid.val[0] = le32_to_cpu(resp->f_fsid_0);
770 	buf->f_fsid.val[1] = le32_to_cpu(resp->f_fsid_1);
771 	buf->f_namelen = le64_to_cpu(resp->f_namelen);
772 	buf->f_frsize = le64_to_cpu(resp->f_frsize);
773 	buf->f_flags = le64_to_cpu(resp->f_flags);
774 	buf->f_spare[0] = le64_to_cpu(resp->f_spare_0);
775 	buf->f_spare[1] = le64_to_cpu(resp->f_spare_1);
776 	buf->f_spare[2] = le64_to_cpu(resp->f_spare_2);
777 	buf->f_spare[3] = le64_to_cpu(resp->f_spare_3);
778 }
779 
hmdfs_send_statfs(struct hmdfs_peer * con,const char * path,struct kstatfs * buf)780 int hmdfs_send_statfs(struct hmdfs_peer *con, const char *path,
781 		      struct kstatfs *buf)
782 {
783 	int ret;
784 	int path_len = strlen(path);
785 	size_t send_len = sizeof(struct statfs_request) + path_len + 1;
786 	struct statfs_request *req = kzalloc(send_len, GFP_KERNEL);
787 	struct hmdfs_send_command sm = {
788 		.data = req,
789 		.len = send_len,
790 		.out_buf = NULL,
791 		.local_filp = NULL,
792 	};
793 
794 	hmdfs_init_cmd(&sm.operations, F_STATFS);
795 	if (!req)
796 		return -ENOMEM;
797 
798 	req->path_len = cpu_to_le32(path_len);
799 	strncpy(req->path, path, path_len);
800 
801 	ret = hmdfs_sendmessage_request(con, &sm);
802 
803 	if (ret == -ETIME)
804 		ret = -EIO;
805 	if (!ret && (sm.out_len == 0 || !sm.out_buf))
806 		ret = -ENOENT;
807 	if (ret)
808 		goto out;
809 
810 	hmdfs_update_statfs_ret(sm.out_buf, buf);
811 out:
812 	kfree(req);
813 	free_sm_outbuf(&sm);
814 	return ret;
815 }
816 
hmdfs_send_syncfs(struct hmdfs_peer * con,int syncfs_timeout)817 int hmdfs_send_syncfs(struct hmdfs_peer *con, int syncfs_timeout)
818 {
819 	int ret;
820 	struct hmdfs_req req;
821 	struct hmdfs_sb_info *sbi = con->sbi;
822 	struct syncfs_request *syncfs_req =
823 		kzalloc(sizeof(struct syncfs_request), GFP_KERNEL);
824 
825 	if (!syncfs_req) {
826 		hmdfs_err("cannot allocate syncfs_request");
827 		return -ENOMEM;
828 	}
829 
830 	hmdfs_init_cmd(&req.operations, F_SYNCFS);
831 	req.timeout = syncfs_timeout;
832 
833 	syncfs_req->version = cpu_to_le64(sbi->hsi.version);
834 	req.data = syncfs_req;
835 	req.data_len = sizeof(*syncfs_req);
836 
837 	ret = hmdfs_send_async_request(con, &req);
838 	if (ret) {
839 		kfree(syncfs_req);
840 		hmdfs_err("ret fail with %d", ret);
841 	}
842 
843 	return ret;
844 }
845 
hmdfs_update_getxattr_ret(struct getxattr_response * resp,void * value,size_t o_size,int * ret)846 static void hmdfs_update_getxattr_ret(struct getxattr_response *resp,
847 				     void *value, size_t o_size, int *ret)
848 {
849 	ssize_t size = le32_to_cpu(resp->size);
850 
851 	if (o_size && o_size < size) {
852 		*ret = -ERANGE;
853 		return;
854 	}
855 
856 	if (o_size)
857 		memcpy(value, resp->value, size);
858 
859 	*ret = size;
860 }
861 
hmdfs_send_getxattr(struct hmdfs_peer * con,const char * send_buf,const char * name,void * value,size_t size)862 int hmdfs_send_getxattr(struct hmdfs_peer *con, const char *send_buf,
863 			const char *name, void *value, size_t size)
864 {
865 	size_t path_len = strlen(send_buf);
866 	size_t name_len = strlen(name);
867 	size_t send_len = path_len + name_len +
868 			  sizeof(struct getxattr_request) + 2;
869 	int ret = 0;
870 	struct getxattr_request *req = kzalloc(send_len, GFP_KERNEL);
871 	struct hmdfs_send_command sm = {
872 		.data = req,
873 		.len = send_len,
874 		.out_buf = NULL,
875 		.local_filp = NULL,
876 	};
877 
878 	hmdfs_init_cmd(&sm.operations, F_GETXATTR);
879 	if (!req)
880 		return -ENOMEM;
881 
882 	req->path_len = cpu_to_le32(path_len);
883 	req->name_len = cpu_to_le32(name_len);
884 	req->size = cpu_to_le32(size);
885 	strncpy(req->buf, send_buf, path_len);
886 	strncpy(req->buf + path_len + 1, name, name_len);
887 	ret = hmdfs_sendmessage_request(con, &sm);
888 	if (!ret && (sm.out_len == 0 || !sm.out_buf))
889 		ret = -ENOENT;
890 	if (ret)
891 		goto out;
892 
893 	hmdfs_update_getxattr_ret(sm.out_buf, value, size, &ret);
894 
895 out:
896 	kfree(req);
897 	free_sm_outbuf(&sm);
898 	return ret;
899 }
900 
hmdfs_send_setxattr(struct hmdfs_peer * con,const char * send_buf,const char * name,const void * value,size_t size,int flags)901 int hmdfs_send_setxattr(struct hmdfs_peer *con, const char *send_buf,
902 			const char *name, const void *value,
903 			size_t size, int flags)
904 {
905 	size_t path_len = strlen(send_buf);
906 	size_t name_len = strlen(name);
907 	size_t send_len = path_len + name_len + size + 2 +
908 			  sizeof(struct setxattr_request);
909 	int ret = 0;
910 	struct setxattr_request *req = kzalloc(send_len, GFP_KERNEL);
911 	struct hmdfs_send_command sm = {
912 		.data = req,
913 		.len = send_len,
914 		.local_filp = NULL,
915 	};
916 
917 	hmdfs_init_cmd(&sm.operations, F_SETXATTR);
918 	if (!req)
919 		return -ENOMEM;
920 
921 	req->path_len = cpu_to_le32(path_len);
922 	req->name_len = cpu_to_le32(name_len);
923 	req->size = cpu_to_le32(size);
924 	req->flags = cpu_to_le32(flags);
925 	strncpy(req->buf, send_buf, path_len);
926 	strncpy(req->buf + path_len + 1, name, name_len);
927 	if (!value)
928 		req->del = true;
929 	else
930 		memcpy(req->buf + path_len + name_len + 2, value, size);
931 
932 	ret = hmdfs_sendmessage_request(con, &sm);
933 	kfree(req);
934 	return ret;
935 }
936 
hmdfs_update_listxattr_ret(struct listxattr_response * resp,char * list,size_t o_size,ssize_t * ret)937 static void hmdfs_update_listxattr_ret(struct listxattr_response *resp,
938 				       char *list, size_t o_size, ssize_t *ret)
939 {
940 	ssize_t size = le32_to_cpu(resp->size);
941 
942 	if (o_size && o_size < size) {
943 		*ret = -ERANGE;
944 		return;
945 	}
946 
947 	/* multi name split with '\0', use memcpy */
948 	if (o_size)
949 		memcpy(list, resp->list, size);
950 
951 	*ret = size;
952 }
953 
hmdfs_send_listxattr(struct hmdfs_peer * con,const char * send_buf,char * list,size_t size)954 ssize_t hmdfs_send_listxattr(struct hmdfs_peer *con, const char *send_buf,
955 			     char *list, size_t size)
956 {
957 	size_t path_len = strlen(send_buf);
958 	size_t send_len = path_len + 1 + sizeof(struct listxattr_request);
959 	ssize_t ret = 0;
960 	struct listxattr_request *req = kzalloc(send_len, GFP_KERNEL);
961 	struct hmdfs_send_command sm = {
962 		.data = req,
963 		.len = send_len,
964 		.out_buf = NULL,
965 		.local_filp = NULL,
966 	};
967 
968 	hmdfs_init_cmd(&sm.operations, F_LISTXATTR);
969 	if (!req)
970 		return -ENOMEM;
971 
972 	req->path_len = cpu_to_le32(path_len);
973 	req->size = cpu_to_le32(size);
974 	strncpy(req->buf, send_buf, path_len);
975 	ret = hmdfs_sendmessage_request(con, &sm);
976 	if (!ret && (sm.out_len == 0 || !sm.out_buf))
977 		ret = -ENOENT;
978 	if (ret)
979 		goto out;
980 
981 	hmdfs_update_listxattr_ret(sm.out_buf, list, size, &ret);
982 
983 out:
984 	kfree(req);
985 	free_sm_outbuf(&sm);
986 	return ret;
987 }
988 
hmdfs_recv_syncfs_cb(struct hmdfs_peer * peer,const struct hmdfs_req * req,const struct hmdfs_resp * resp)989 void hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req,
990 			  const struct hmdfs_resp *resp)
991 {
992 	struct hmdfs_sb_info *sbi = peer->sbi;
993 	struct syncfs_request *syncfs_req = (struct syncfs_request *)req->data;
994 
995 	WARN_ON(!syncfs_req);
996 	spin_lock(&sbi->hsi.v_lock);
997 	if (le64_to_cpu(syncfs_req->version) != sbi->hsi.version) {
998 		hmdfs_info(
999 			"Recv stale syncfs resp[ver: %llu] from device %llu, current ver %llu",
1000 			le64_to_cpu(syncfs_req->version), peer->device_id,
1001 			sbi->hsi.version);
1002 		spin_unlock(&sbi->hsi.v_lock);
1003 		goto out;
1004 	}
1005 
1006 	if (!sbi->hsi.remote_ret)
1007 		sbi->hsi.remote_ret = resp->ret_code;
1008 
1009 	if (resp->ret_code) {
1010 		hmdfs_err("Recv syncfs error code %d from device %llu",
1011 			  resp->ret_code, peer->device_id);
1012 	} else {
1013 		/*
1014 		 * Set @sb_dirty_count to zero if no one else produce
1015 		 * dirty data on remote server during remote sync.
1016 		 */
1017 		atomic64_cmpxchg(&peer->sb_dirty_count,
1018 				 peer->old_sb_dirty_count, 0);
1019 	}
1020 
1021 	atomic_dec(&sbi->hsi.wait_count);
1022 	spin_unlock(&sbi->hsi.v_lock);
1023 	wake_up_interruptible(&sbi->hsi.wq);
1024 
1025 out:
1026 	kfree(syncfs_req);
1027 }
1028 
hmdfs_send_drop_push(struct hmdfs_peer * con,const char * path)1029 void hmdfs_send_drop_push(struct hmdfs_peer *con, const char *path)
1030 {
1031 	int path_len = strlen(path);
1032 	size_t send_len = sizeof(struct drop_push_request) + path_len + 1;
1033 	struct drop_push_request *dp_req = kzalloc(send_len, GFP_KERNEL);
1034 	struct hmdfs_send_command sm = {
1035 		.data = dp_req,
1036 		.len = send_len,
1037 		.local_filp = NULL,
1038 	};
1039 
1040 	hmdfs_init_cmd(&sm.operations, F_DROP_PUSH);
1041 	if (!dp_req)
1042 		return;
1043 
1044 	dp_req->path_len = cpu_to_le32(path_len);
1045 	strncpy(dp_req->path, path, path_len);
1046 
1047 	hmdfs_sendmessage_request(con, &sm);
1048 	kfree(dp_req);
1049 }
1050 
hmdfs_get_msg_next(struct hmdfs_peer * peer,int * id)1051 static void *hmdfs_get_msg_next(struct hmdfs_peer *peer, int *id)
1052 {
1053 	struct hmdfs_msg_idr_head *head = NULL;
1054 
1055 	spin_lock(&peer->idr_lock);
1056 	head = idr_get_next(&peer->msg_idr, id);
1057 	if (head && head->type < MSG_IDR_MAX && head->type >= 0)
1058 		kref_get(&head->ref);
1059 
1060 	spin_unlock(&peer->idr_lock);
1061 
1062 	return head;
1063 }
1064 
hmdfs_client_offline_notify(struct hmdfs_peer * conn,int evt,unsigned int seq)1065 void hmdfs_client_offline_notify(struct hmdfs_peer *conn, int evt,
1066 				 unsigned int seq)
1067 {
1068 	int id;
1069 	int count = 0;
1070 	struct hmdfs_msg_idr_head *head = NULL;
1071 
1072 	for (id = 0; (head = hmdfs_get_msg_next(conn, &id)) != NULL; ++id) {
1073 		switch (head->type) {
1074 		case MSG_IDR_1_0_NONE:
1075 			head_put(head);
1076 			head_put(head);
1077 			break;
1078 		case MSG_IDR_MESSAGE_SYNC:
1079 		case MSG_IDR_1_0_MESSAGE_SYNC:
1080 			hmdfs_response_wakeup((struct sendmsg_wait_queue *)head,
1081 					      -ETIME, 0, NULL);
1082 			hmdfs_debug("wakeup id=%d", head->msg_id);
1083 			msg_put((struct sendmsg_wait_queue *)head);
1084 			break;
1085 		case MSG_IDR_MESSAGE_ASYNC:
1086 			hmdfs_wakeup_parasite(
1087 				(struct hmdfs_msg_parasite *)head);
1088 			hmdfs_debug("wakeup parasite id=%d", head->msg_id);
1089 			mp_put((struct hmdfs_msg_parasite *)head);
1090 			break;
1091 		case MSG_IDR_PAGE:
1092 		case MSG_IDR_1_0_PAGE:
1093 			hmdfs_wakeup_async_work(
1094 				(struct hmdfs_async_work *)head);
1095 			hmdfs_debug("wakeup async work id=%d", head->msg_id);
1096 			asw_put((struct hmdfs_async_work *)head);
1097 			break;
1098 		default:
1099 			hmdfs_err("Bad type=%d id=%d", head->type,
1100 				  head->msg_id);
1101 			break;
1102 		}
1103 
1104 		count++;
1105 		/* If there are too many idr to process, avoid to soft lockup,
1106 		 * process every 512 message we resched
1107 		 */
1108 		if (count % HMDFS_IDR_RESCHED_COUNT == 0)
1109 			cond_resched();
1110 	}
1111 }
1112 
1113 static struct hmdfs_node_cb_desc client_cb[] = {
1114 	{
1115 		.evt = NODE_EVT_OFFLINE,
1116 		.sync = true,
1117 		.fn = hmdfs_client_offline_notify,
1118 	},
1119 };
1120 
hmdfs_client_add_node_evt_cb(void)1121 void __init hmdfs_client_add_node_evt_cb(void)
1122 {
1123 	hmdfs_node_add_evt_cb(client_cb, ARRAY_SIZE(client_cb));
1124 }
1125