1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/comm/device_node.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "device_node.h"
9
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kfifo.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/sysfs.h>
18 #include <linux/types.h>
19 #include <linux/backing-dev.h>
20
21 #include "client_writeback.h"
22 #include "server_writeback.h"
23 #include "connection.h"
24 #include "hmdfs_client.h"
25 #include "socket_adapter.h"
26 #include "authority/authentication.h"
27
28 DEFINE_MUTEX(hmdfs_sysfs_mutex);
29 static struct kset *hmdfs_kset;
30
31 struct hmdfs_disconnect_node_work {
32 struct hmdfs_peer *conn;
33 struct work_struct work;
34 atomic_t *cnt;
35 struct wait_queue_head *waitq;
36 };
37
ctrl_cmd_update_socket_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)38 static void ctrl_cmd_update_socket_handler(const char *buf, size_t len,
39 struct hmdfs_sb_info *sbi)
40 {
41 struct update_socket_param cmd;
42 struct hmdfs_peer *node = NULL;
43 struct connection *conn = NULL;
44
45 if (unlikely(!buf || len != sizeof(cmd))) {
46 hmdfs_err("len/buf error");
47 goto out;
48 }
49 memcpy(&cmd, buf, sizeof(cmd));
50
51 node = hmdfs_get_peer(sbi, cmd.cid, cmd.devsl);
52 if (unlikely(!node)) {
53 hmdfs_err("failed to update ctrl node: cannot get peer");
54 goto out;
55 }
56
57 conn = hmdfs_get_conn_tcp(node, cmd.newfd, cmd.masterkey, cmd.status);
58 if (unlikely(!conn)) {
59 hmdfs_err("failed to update ctrl node: cannot get conn");
60 } else if (!sbi->system_cred) {
61 const struct cred *system_cred = get_cred(current_cred());
62
63 if (cmpxchg_relaxed(&sbi->system_cred, NULL, system_cred))
64 put_cred(system_cred);
65 else
66 hmdfs_check_cred(system_cred);
67 }
68 out:
69 if (conn)
70 connection_put(conn);
71 if (node)
72 peer_put(node);
73 }
74
ctrl_cmd_update_devsl_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)75 static void ctrl_cmd_update_devsl_handler(const char *buf, size_t len,
76 struct hmdfs_sb_info *sbi)
77 {
78 struct update_devsl_param cmd;
79 struct hmdfs_peer *node = NULL;
80
81 if (unlikely(!buf || len != sizeof(cmd))) {
82 hmdfs_err("Recved a invalid userbuf");
83 return;
84 }
85 memcpy(&cmd, buf, sizeof(cmd));
86
87 node = hmdfs_lookup_from_cid(sbi, cmd.cid);
88 if (unlikely(!node)) {
89 hmdfs_err("failed to update devsl: cannot get peer");
90 return;
91 }
92 hmdfs_info("Found peer: device_id = %llu", node->device_id);
93 node->devsl = cmd.devsl;
94 peer_put(node);
95 }
96
hmdfs_disconnect_node_marked(struct hmdfs_peer * conn)97 static inline void hmdfs_disconnect_node_marked(struct hmdfs_peer *conn)
98 {
99 hmdfs_start_process_offline(conn);
100 hmdfs_disconnect_node(conn);
101 hmdfs_stop_process_offline(conn);
102 }
103
ctrl_cmd_off_line_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)104 static void ctrl_cmd_off_line_handler(const char *buf, size_t len,
105 struct hmdfs_sb_info *sbi)
106 {
107 struct offline_param cmd;
108 struct hmdfs_peer *node = NULL;
109
110 if (unlikely(!buf || len != sizeof(cmd))) {
111 hmdfs_err("Recved a invalid userbuf");
112 return;
113 }
114 memcpy(&cmd, buf, sizeof(cmd));
115 node = hmdfs_lookup_from_cid(sbi, cmd.remote_cid);
116 if (unlikely(!node)) {
117 hmdfs_err("Cannot find node by device");
118 return;
119 }
120 hmdfs_info("Found peer: device_id = %llu", node->device_id);
121 hmdfs_disconnect_node_marked(node);
122 peer_put(node);
123 }
124
hmdfs_disconnect_node_work_fn(struct work_struct * base)125 static void hmdfs_disconnect_node_work_fn(struct work_struct *base)
126 {
127 struct hmdfs_disconnect_node_work *work =
128 container_of(base, struct hmdfs_disconnect_node_work, work);
129
130 hmdfs_disconnect_node_marked(work->conn);
131 if (atomic_dec_and_test(work->cnt))
132 wake_up(work->waitq);
133 kfree(work);
134 }
135
ctrl_cmd_off_line_all_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)136 static void ctrl_cmd_off_line_all_handler(const char *buf, size_t len,
137 struct hmdfs_sb_info *sbi)
138 {
139 struct hmdfs_peer *node = NULL;
140 struct hmdfs_disconnect_node_work *work = NULL;
141 atomic_t cnt = ATOMIC_INIT(0);
142 wait_queue_head_t waitq;
143
144 if (unlikely(len != sizeof(struct offline_all_param))) {
145 hmdfs_err("Recved a invalid userbuf, len %zu, expect %zu\n",
146 len, sizeof(struct offline_all_param));
147 return;
148 }
149
150 init_waitqueue_head(&waitq);
151 mutex_lock(&sbi->connections.node_lock);
152 list_for_each_entry(node, &sbi->connections.node_list, list) {
153 mutex_unlock(&sbi->connections.node_lock);
154 work = kmalloc(sizeof(*work), GFP_KERNEL);
155 if (work) {
156 atomic_inc(&cnt);
157 work->conn = node;
158 work->cnt = &cnt;
159 work->waitq = &waitq;
160 INIT_WORK(&work->work, hmdfs_disconnect_node_work_fn);
161 schedule_work(&work->work);
162 } else {
163 hmdfs_disconnect_node_marked(node);
164 }
165 mutex_lock(&sbi->connections.node_lock);
166 }
167 mutex_unlock(&sbi->connections.node_lock);
168
169 wait_event(waitq, !atomic_read(&cnt));
170 }
171
172 typedef void (*ctrl_cmd_handler)(const char *buf, size_t len,
173 struct hmdfs_sb_info *sbi);
174
175 static const ctrl_cmd_handler cmd_handler[CMD_CNT] = {
176 [CMD_UPDATE_SOCKET] = ctrl_cmd_update_socket_handler,
177 [CMD_UPDATE_DEVSL] = ctrl_cmd_update_devsl_handler,
178 [CMD_OFF_LINE] = ctrl_cmd_off_line_handler,
179 [CMD_OFF_LINE_ALL] = ctrl_cmd_off_line_all_handler,
180 };
181
sbi_cmd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)182 static ssize_t sbi_cmd_show(struct kobject *kobj, struct sbi_attribute *attr,
183 char *buf)
184 {
185 struct notify_param param;
186 int out_len;
187 struct hmdfs_sb_info *sbi = to_sbi(kobj);
188
189 memset(¶m, 0, sizeof(param));
190 spin_lock(&sbi->notify_fifo_lock);
191 out_len = kfifo_out(&sbi->notify_fifo, ¶m, sizeof(param));
192 spin_unlock(&sbi->notify_fifo_lock);
193 if (out_len != sizeof(param))
194 param.notify = NOTIFY_NONE;
195 memcpy(buf, ¶m, sizeof(param));
196 return sizeof(param);
197 }
198
cmd2str(int cmd)199 static const char *cmd2str(int cmd)
200 {
201 switch (cmd) {
202 case 0:
203 return "CMD_UPDATE_SOCKET";
204 case 1:
205 return "CMD_UPDATE_DEVSL";
206 case 2:
207 return "CMD_OFF_LINE";
208 case 3:
209 return "CMD_OFF_LINE_ALL";
210 default:
211 return "illegal cmd";
212 }
213 }
214
sbi_cmd_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)215 static ssize_t sbi_cmd_store(struct kobject *kobj, struct sbi_attribute *attr,
216 const char *buf, size_t len)
217 {
218 int cmd;
219 struct hmdfs_sb_info *sbi = to_sbi(kobj);
220
221 if (!sbi) {
222 hmdfs_info("Fatal! Empty sbi. Mount fs first");
223 return len;
224 }
225 if (len < sizeof(int)) {
226 hmdfs_err("Illegal cmd: cmd len = %zu", len);
227 return len;
228 }
229 cmd = *(int *)buf;
230 if (cmd < 0 || cmd >= CMD_CNT) {
231 hmdfs_err("Illegal cmd : cmd = %d", cmd);
232 return len;
233 }
234 hmdfs_info("Recved cmd: %s", cmd2str(cmd));
235 if (cmd_handler[cmd])
236 cmd_handler[cmd](buf, len, sbi);
237 return len;
238 }
239
240 static struct sbi_attribute sbi_cmd_attr =
241 __ATTR(cmd, 0664, sbi_cmd_show, sbi_cmd_store);
242
sbi_status_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)243 static ssize_t sbi_status_show(struct kobject *kobj, struct sbi_attribute *attr,
244 char *buf)
245 {
246 ssize_t size = 0;
247 struct hmdfs_sb_info *sbi = NULL;
248 struct hmdfs_peer *peer = NULL;
249 struct connection *conn_impl = NULL;
250 struct tcp_handle *tcp = NULL;
251
252 sbi = to_sbi(kobj);
253 size += sprintf(buf + size, "peers version status\n");
254
255 mutex_lock(&sbi->connections.node_lock);
256 list_for_each_entry(peer, &sbi->connections.node_list, list) {
257 size += sprintf(buf + size, "%llu %d %d\n", peer->device_id,
258 peer->version, peer->status);
259 // connection information
260 size += sprintf(
261 buf + size,
262 "\t socket_fd connection_status tcp_status ... refcnt\n");
263 mutex_lock(&peer->conn_impl_list_lock);
264 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
265 tcp = conn_impl->connect_handle;
266 size += sprintf(buf + size, "\t %d \t%d \t%d \t%p \t%ld\n",
267 tcp->fd, conn_impl->status,
268 tcp->sock->state, tcp->sock, file_count(tcp->sock->file));
269 }
270 mutex_unlock(&peer->conn_impl_list_lock);
271 }
272 mutex_unlock(&sbi->connections.node_lock);
273 return size;
274 }
275
sbi_status_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)276 static ssize_t sbi_status_store(struct kobject *kobj,
277 struct sbi_attribute *attr, const char *buf,
278 size_t len)
279 {
280 return len;
281 }
282
283 static struct sbi_attribute sbi_status_attr =
284 __ATTR(status, 0664, sbi_status_show, sbi_status_store);
285
sbi_stat_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)286 static ssize_t sbi_stat_show(struct kobject *kobj, struct sbi_attribute *attr,
287 char *buf)
288 {
289 ssize_t size = 0;
290 struct hmdfs_sb_info *sbi = NULL;
291 struct hmdfs_peer *peer = NULL;
292 struct connection *conn_impl = NULL;
293 struct tcp_handle *tcp = NULL;
294
295 sbi = to_sbi(kobj);
296 mutex_lock(&sbi->connections.node_lock);
297 list_for_each_entry(peer, &sbi->connections.node_list, list) {
298 // connection information
299 mutex_lock(&peer->conn_impl_list_lock);
300 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
301 tcp = conn_impl->connect_handle;
302 size += sprintf(buf + size, "socket_fd: %d\n", tcp->fd);
303 size += sprintf(buf + size,
304 "\tsend_msg %d \tsend_bytes %llu\n",
305 conn_impl->stat.send_message_count,
306 conn_impl->stat.send_bytes);
307 size += sprintf(buf + size,
308 "\trecv_msg %d \trecv_bytes %llu\n",
309 conn_impl->stat.recv_message_count,
310 conn_impl->stat.recv_bytes);
311 }
312 mutex_unlock(&peer->conn_impl_list_lock);
313 }
314 mutex_unlock(&sbi->connections.node_lock);
315 return size;
316 }
317
sbi_stat_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)318 static ssize_t sbi_stat_store(struct kobject *kobj, struct sbi_attribute *attr,
319 const char *buf, size_t len)
320 {
321 struct hmdfs_sb_info *sbi = NULL;
322 struct hmdfs_peer *peer = NULL;
323 struct connection *conn_impl = NULL;
324
325 sbi = to_sbi(kobj);
326 mutex_lock(&sbi->connections.node_lock);
327 list_for_each_entry(peer, &sbi->connections.node_list, list) {
328 // connection information
329 mutex_lock(&peer->conn_impl_list_lock);
330 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
331 conn_impl->stat.send_message_count = 0;
332 conn_impl->stat.send_bytes = 0;
333 conn_impl->stat.recv_message_count = 0;
334 conn_impl->stat.recv_bytes = 0;
335 }
336 mutex_unlock(&peer->conn_impl_list_lock);
337 }
338 mutex_unlock(&sbi->connections.node_lock);
339 return len;
340 }
341
342 static struct sbi_attribute sbi_statistic_attr =
343 __ATTR(statistic, 0664, sbi_stat_show, sbi_stat_store);
344
sbi_dcache_precision_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)345 static ssize_t sbi_dcache_precision_show(struct kobject *kobj,
346 struct sbi_attribute *attr, char *buf)
347 {
348 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->dcache_precision);
349 }
350
351 #define PRECISION_MAX 3600000
352
sbi_dcache_precision_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)353 static ssize_t sbi_dcache_precision_store(struct kobject *kobj,
354 struct sbi_attribute *attr,
355 const char *buf, size_t len)
356 {
357 int ret;
358 unsigned int precision;
359 struct hmdfs_sb_info *sbi = to_sbi(kobj);
360
361 ret = kstrtouint(skip_spaces(buf), 0, &precision);
362 if (!ret) {
363 if (precision <= PRECISION_MAX)
364 sbi->dcache_precision = precision;
365 else
366 ret = -EINVAL;
367 }
368
369 return ret ? ret : len;
370 }
371
372 static struct sbi_attribute sbi_dcache_precision_attr =
373 __ATTR(dcache_precision, 0664, sbi_dcache_precision_show,
374 sbi_dcache_precision_store);
375
sbi_dcache_threshold_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)376 static ssize_t sbi_dcache_threshold_show(struct kobject *kobj,
377 struct sbi_attribute *attr, char *buf)
378 {
379 return snprintf(buf, PAGE_SIZE, "%lu\n",
380 to_sbi(kobj)->dcache_threshold);
381 }
382
sbi_dcache_threshold_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)383 static ssize_t sbi_dcache_threshold_store(struct kobject *kobj,
384 struct sbi_attribute *attr,
385 const char *buf, size_t len)
386 {
387 int ret;
388 unsigned long threshold;
389 struct hmdfs_sb_info *sbi = to_sbi(kobj);
390
391 ret = kstrtoul(skip_spaces(buf), 0, &threshold);
392 if (!ret)
393 sbi->dcache_threshold = threshold;
394
395 return ret ? ret : len;
396 }
397
398 static struct sbi_attribute sbi_dcache_threshold_attr =
399 __ATTR(dcache_threshold, 0664, sbi_dcache_threshold_show,
400 sbi_dcache_threshold_store);
401
server_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)402 static ssize_t server_statistic_show(struct kobject *kobj,
403 struct sbi_attribute *attr, char *buf)
404 {
405 int i, ret;
406 const size_t size = PAGE_SIZE - 1;
407 ssize_t pos = 0;
408 struct server_statistic *stat = to_sbi(kobj)->s_server_statis;
409
410 for (i = 0; i < F_SIZE; i++) {
411
412 ret = snprintf(buf + pos, size - pos,
413 "%llu %u %llu %llu\n",
414 stat[i].cnt,
415 jiffies_to_msecs(stat[i].max),
416 stat[i].snd_cnt, stat[i].snd_fail_cnt);
417 if (ret > size - pos)
418 break;
419 pos += ret;
420 }
421
422 /* If break, we should add a new line */
423 if (i < F_SIZE) {
424 ret = snprintf(buf + pos, size + 1 - pos, "\n");
425 pos += ret;
426 }
427 return pos;
428 }
429
430 static struct sbi_attribute sbi_local_op_attr = __ATTR_RO(server_statistic);
431
client_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)432 static ssize_t client_statistic_show(struct kobject *kobj,
433 struct sbi_attribute *attr, char *buf)
434 {
435 int i, ret;
436 const size_t size = PAGE_SIZE - 1;
437 ssize_t pos = 0;
438 struct client_statistic *stat = to_sbi(kobj)->s_client_statis;
439
440 for (i = 0; i < F_SIZE; i++) {
441
442 ret = snprintf(buf + pos, size - pos,
443 "%llu %llu %llu %llu %llu %u\n",
444 stat[i].snd_cnt,
445 stat[i].snd_fail_cnt,
446 stat[i].resp_cnt,
447 stat[i].timeout_cnt,
448 stat[i].delay_resp_cnt,
449 jiffies_to_msecs(stat[i].max));
450 if (ret > size - pos)
451 break;
452 pos += ret;
453 }
454
455 /* If break, we should add a new line */
456 if (i < F_SIZE) {
457 ret = snprintf(buf + pos, size + 1 - pos, "\n");
458 pos += ret;
459 }
460
461 return pos;
462 }
463
464 static struct sbi_attribute sbi_delay_resp_attr = __ATTR_RO(client_statistic);
465
pages_to_kbytes(unsigned long page)466 static inline unsigned long pages_to_kbytes(unsigned long page)
467 {
468 return page << (PAGE_SHIFT - 10);
469 }
470
dirty_writeback_stats_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)471 static ssize_t dirty_writeback_stats_show(struct kobject *kobj,
472 struct sbi_attribute *attr,
473 char *buf)
474 {
475 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
476 struct hmdfs_writeback *hwb = sbi->h_wb;
477 unsigned long avg;
478 unsigned long max;
479 unsigned long min;
480
481 spin_lock(&hwb->write_bandwidth_lock);
482 avg = hwb->avg_write_bandwidth;
483 max = hwb->max_write_bandwidth;
484 min = hwb->min_write_bandwidth;
485 spin_unlock(&hwb->write_bandwidth_lock);
486
487 if (min == ULONG_MAX)
488 min = 0;
489
490 return snprintf(buf, PAGE_SIZE,
491 "%10lu\n"
492 "%10lu\n"
493 "%10lu\n",
494 pages_to_kbytes(avg),
495 pages_to_kbytes(max),
496 pages_to_kbytes(min));
497 }
498
499 static struct sbi_attribute sbi_dirty_writeback_stats_attr =
500 __ATTR_RO(dirty_writeback_stats);
501
sbi_wb_timeout_ms_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)502 static ssize_t sbi_wb_timeout_ms_show(struct kobject *kobj,
503 struct sbi_attribute *attr,
504 char *buf)
505 {
506 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
507
508 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->wb_timeout_ms);
509 }
510
sbi_wb_timeout_ms_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)511 static ssize_t sbi_wb_timeout_ms_store(struct kobject *kobj,
512 struct sbi_attribute *attr,
513 const char *buf, size_t len)
514 {
515 struct hmdfs_sb_info *sbi = to_sbi(kobj);
516 unsigned int val;
517 int err;
518
519 err = kstrtouint(buf, 10, &val);
520 if (err)
521 return err;
522
523 if (!val || val > HMDFS_MAX_WB_TIMEOUT_MS)
524 return -EINVAL;
525
526 sbi->wb_timeout_ms = val;
527
528 return len;
529 }
530
531 static struct sbi_attribute sbi_wb_timeout_ms_attr =
532 __ATTR(wb_timeout_ms, 0664, sbi_wb_timeout_ms_show,
533 sbi_wb_timeout_ms_store);
534
sbi_dirty_writeback_centisecs_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)535 static ssize_t sbi_dirty_writeback_centisecs_show(struct kobject *kobj,
536 struct sbi_attribute *attr,
537 char *buf)
538 {
539 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
540
541 return snprintf(buf, PAGE_SIZE, "%u\n",
542 sbi->h_wb->dirty_writeback_interval);
543 }
544
sbi_dirty_writeback_centisecs_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)545 static ssize_t sbi_dirty_writeback_centisecs_store(struct kobject *kobj,
546 struct sbi_attribute *attr,
547 const char *buf, size_t len)
548 {
549 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
550 int err;
551
552 err = kstrtouint(buf, 10, &sbi->h_wb->dirty_writeback_interval);
553 if (err)
554 return err;
555 return len;
556 }
557
558 static struct sbi_attribute sbi_dirty_writeback_centisecs_attr =
559 __ATTR(dirty_writeback_centisecs, 0664,
560 sbi_dirty_writeback_centisecs_show,
561 sbi_dirty_writeback_centisecs_store);
562
sbi_dirty_file_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)563 static ssize_t sbi_dirty_file_background_bytes_show(struct kobject *kobj,
564 struct sbi_attribute *attr,
565 char *buf)
566 {
567 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
568
569 return snprintf(buf, PAGE_SIZE, "%lu\n",
570 sbi->h_wb->dirty_file_bg_bytes);
571 }
572
sbi_dirty_file_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)573 static ssize_t sbi_dirty_file_background_bytes_store(struct kobject *kobj,
574 struct sbi_attribute *attr,
575 const char *buf,
576 size_t len)
577 {
578 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
579 unsigned long file_background_bytes = 0;
580 int err;
581
582 err = kstrtoul(buf, 10, &file_background_bytes);
583 if (err)
584 return err;
585 if (file_background_bytes == 0)
586 return -EINVAL;
587
588 sbi->h_wb->dirty_fs_bytes =
589 max(sbi->h_wb->dirty_fs_bytes, file_background_bytes);
590 sbi->h_wb->dirty_fs_bg_bytes =
591 max(sbi->h_wb->dirty_fs_bg_bytes, file_background_bytes);
592 sbi->h_wb->dirty_file_bytes =
593 max(sbi->h_wb->dirty_file_bytes, file_background_bytes);
594
595 sbi->h_wb->dirty_file_bg_bytes = file_background_bytes;
596 hmdfs_calculate_dirty_thresh(sbi->h_wb);
597 hmdfs_update_ratelimit(sbi->h_wb);
598 return len;
599 }
600
sbi_dirty_fs_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)601 static ssize_t sbi_dirty_fs_background_bytes_show(struct kobject *kobj,
602 struct sbi_attribute *attr,
603 char *buf)
604 {
605 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
606
607 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bg_bytes);
608 }
609
sbi_dirty_fs_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)610 static ssize_t sbi_dirty_fs_background_bytes_store(struct kobject *kobj,
611 struct sbi_attribute *attr,
612 const char *buf, size_t len)
613 {
614 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
615 unsigned long fs_background_bytes = 0;
616 int err;
617
618 err = kstrtoul(buf, 10, &fs_background_bytes);
619 if (err)
620 return err;
621 if (fs_background_bytes == 0)
622 return -EINVAL;
623
624 sbi->h_wb->dirty_file_bg_bytes =
625 min(sbi->h_wb->dirty_file_bg_bytes, fs_background_bytes);
626 sbi->h_wb->dirty_fs_bytes =
627 max(sbi->h_wb->dirty_fs_bytes, fs_background_bytes);
628
629 sbi->h_wb->dirty_fs_bg_bytes = fs_background_bytes;
630 hmdfs_calculate_dirty_thresh(sbi->h_wb);
631 hmdfs_update_ratelimit(sbi->h_wb);
632 return len;
633 }
634
635 static struct sbi_attribute sbi_dirty_file_background_bytes_attr =
636 __ATTR(dirty_file_background_bytes, 0644,
637 sbi_dirty_file_background_bytes_show,
638 sbi_dirty_file_background_bytes_store);
639 static struct sbi_attribute sbi_dirty_fs_background_bytes_attr =
640 __ATTR(dirty_fs_background_bytes, 0644,
641 sbi_dirty_fs_background_bytes_show,
642 sbi_dirty_fs_background_bytes_store);
643
sbi_dirty_file_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)644 static ssize_t sbi_dirty_file_bytes_show(struct kobject *kobj,
645 struct sbi_attribute *attr, char *buf)
646 {
647 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
648
649 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_file_bytes);
650 }
651
sbi_dirty_file_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)652 static ssize_t sbi_dirty_file_bytes_store(struct kobject *kobj,
653 struct sbi_attribute *attr,
654 const char *buf, size_t len)
655 {
656 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
657 unsigned long file_bytes = 0;
658 int err;
659
660 err = kstrtoul(buf, 10, &file_bytes);
661 if (err)
662 return err;
663 if (file_bytes == 0)
664 return -EINVAL;
665
666 sbi->h_wb->dirty_file_bg_bytes =
667 min(sbi->h_wb->dirty_file_bg_bytes, file_bytes);
668 sbi->h_wb->dirty_fs_bytes = max(sbi->h_wb->dirty_fs_bytes, file_bytes);
669
670 sbi->h_wb->dirty_file_bytes = file_bytes;
671 hmdfs_calculate_dirty_thresh(sbi->h_wb);
672 hmdfs_update_ratelimit(sbi->h_wb);
673 return len;
674 }
675
sbi_dirty_fs_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)676 static ssize_t sbi_dirty_fs_bytes_show(struct kobject *kobj,
677 struct sbi_attribute *attr, char *buf)
678 {
679 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
680
681 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bytes);
682 }
683
sbi_dirty_fs_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)684 static ssize_t sbi_dirty_fs_bytes_store(struct kobject *kobj,
685 struct sbi_attribute *attr,
686 const char *buf, size_t len)
687 {
688 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
689 unsigned long fs_bytes = 0;
690 int err;
691
692 err = kstrtoul(buf, 10, &fs_bytes);
693 if (err)
694 return err;
695 if (fs_bytes == 0)
696 return -EINVAL;
697
698 sbi->h_wb->dirty_file_bg_bytes =
699 min(sbi->h_wb->dirty_file_bg_bytes, fs_bytes);
700 sbi->h_wb->dirty_file_bytes =
701 min(sbi->h_wb->dirty_file_bytes, fs_bytes);
702 sbi->h_wb->dirty_fs_bg_bytes =
703 min(sbi->h_wb->dirty_fs_bg_bytes, fs_bytes);
704
705 sbi->h_wb->dirty_fs_bytes = fs_bytes;
706 hmdfs_calculate_dirty_thresh(sbi->h_wb);
707 hmdfs_update_ratelimit(sbi->h_wb);
708 return len;
709 }
710
711 static struct sbi_attribute sbi_dirty_file_bytes_attr =
712 __ATTR(dirty_file_bytes, 0644, sbi_dirty_file_bytes_show,
713 sbi_dirty_file_bytes_store);
714 static struct sbi_attribute sbi_dirty_fs_bytes_attr =
715 __ATTR(dirty_fs_bytes, 0644, sbi_dirty_fs_bytes_show,
716 sbi_dirty_fs_bytes_store);
717
sbi_dirty_writeback_timelimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)718 static ssize_t sbi_dirty_writeback_timelimit_show(struct kobject *kobj,
719 struct sbi_attribute *attr,
720 char *buf)
721 {
722 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
723
724 return snprintf(buf, PAGE_SIZE, "%u\n",
725 sbi->h_wb->writeback_timelimit / HZ);
726 }
727
sbi_dirty_writeback_timelimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)728 static ssize_t sbi_dirty_writeback_timelimit_store(struct kobject *kobj,
729 struct sbi_attribute *attr,
730 const char *buf,
731 size_t len)
732 {
733 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
734 unsigned int time_limit = 0;
735 int err;
736
737 err = kstrtouint(buf, 10, &time_limit);
738 if (err)
739 return err;
740 if (time_limit == 0 || time_limit > (HMDFS_MAX_WB_TIMELIMIT / HZ))
741 return -EINVAL;
742
743 sbi->h_wb->writeback_timelimit = time_limit * HZ;
744 return len;
745 }
746
747 static struct sbi_attribute sbi_dirty_writeback_timelimit_attr =
748 __ATTR(dirty_writeback_timelimit, 0644, sbi_dirty_writeback_timelimit_show,
749 sbi_dirty_writeback_timelimit_store);
750
sbi_dirty_thresh_lowerlimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)751 static ssize_t sbi_dirty_thresh_lowerlimit_show(struct kobject *kobj,
752 struct sbi_attribute *attr,
753 char *buf)
754 {
755 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
756
757 return snprintf(buf, PAGE_SIZE, "%lu\n",
758 sbi->h_wb->bw_thresh_lowerlimit << PAGE_SHIFT);
759 }
760
sbi_dirty_thresh_lowerlimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)761 static ssize_t sbi_dirty_thresh_lowerlimit_store(struct kobject *kobj,
762 struct sbi_attribute *attr,
763 const char *buf,
764 size_t len)
765 {
766 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
767 unsigned long bw_thresh_lowerbytes = 0;
768 unsigned long bw_thresh_lowerlimit;
769 int err;
770
771 err = kstrtoul(buf, 10, &bw_thresh_lowerbytes);
772 if (err)
773 return err;
774
775 bw_thresh_lowerlimit = DIV_ROUND_UP(bw_thresh_lowerbytes, PAGE_SIZE);
776 if (bw_thresh_lowerlimit < HMDFS_BW_THRESH_MIN_LIMIT ||
777 bw_thresh_lowerlimit > HMDFS_BW_THRESH_MAX_LIMIT)
778 return -EINVAL;
779
780 sbi->h_wb->bw_thresh_lowerlimit = bw_thresh_lowerlimit;
781 return len;
782 }
783
784 static struct sbi_attribute sbi_dirty_thresh_lowerlimit_attr =
785 __ATTR(dirty_thresh_lowerlimit, 0644, sbi_dirty_thresh_lowerlimit_show,
786 sbi_dirty_thresh_lowerlimit_store);
787
sbi_dirty_writeback_autothresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)788 static ssize_t sbi_dirty_writeback_autothresh_show(struct kobject *kobj,
789 struct sbi_attribute *attr,
790 char *buf)
791 {
792 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
793
794 return snprintf(buf, PAGE_SIZE, "%d\n",
795 sbi->h_wb->dirty_auto_threshold);
796 }
797
sbi_dirty_writeback_autothresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)798 static ssize_t sbi_dirty_writeback_autothresh_store(struct kobject *kobj,
799 struct sbi_attribute *attr,
800 const char *buf,
801 size_t len)
802 {
803 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
804 bool dirty_auto_threshold = false;
805 int err;
806
807 err = kstrtobool(buf, &dirty_auto_threshold);
808 if (err)
809 return err;
810
811 sbi->h_wb->dirty_auto_threshold = dirty_auto_threshold;
812 return len;
813 }
814
815 static struct sbi_attribute sbi_dirty_writeback_autothresh_attr =
816 __ATTR(dirty_writeback_autothresh, 0644, sbi_dirty_writeback_autothresh_show,
817 sbi_dirty_writeback_autothresh_store);
818
sbi_dirty_writeback_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)819 static ssize_t sbi_dirty_writeback_control_show(struct kobject *kobj,
820 struct sbi_attribute *attr,
821 char *buf)
822 {
823 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
824
825 return snprintf(buf, PAGE_SIZE, "%d\n",
826 sbi->h_wb->dirty_writeback_control);
827 }
828
sbi_dirty_writeback_control_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)829 static ssize_t sbi_dirty_writeback_control_store(struct kobject *kobj,
830 struct sbi_attribute *attr,
831 const char *buf, size_t len)
832 {
833 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
834 unsigned int dirty_writeback_control = 0;
835 int err;
836
837 err = kstrtouint(buf, 10, &dirty_writeback_control);
838 if (err)
839 return err;
840
841 sbi->h_wb->dirty_writeback_control = (bool)dirty_writeback_control;
842 return len;
843 }
844
845 static struct sbi_attribute sbi_dirty_writeback_control_attr =
846 __ATTR(dirty_writeback_control, 0644, sbi_dirty_writeback_control_show,
847 sbi_dirty_writeback_control_store);
848
sbi_srv_dirty_thresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)849 static ssize_t sbi_srv_dirty_thresh_show(struct kobject *kobj,
850 struct sbi_attribute *attr,
851 char *buf)
852 {
853 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
854
855 return snprintf(buf, PAGE_SIZE, "%d\n",
856 sbi->h_swb->dirty_thresh_pg >> HMDFS_MB_TO_PAGE_SHIFT);
857 }
858
sbi_srv_dirty_thresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)859 static ssize_t sbi_srv_dirty_thresh_store(struct kobject *kobj,
860 struct sbi_attribute *attr,
861 const char *buf,
862 size_t len)
863 {
864 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
865 int dirty_thresh_mb;
866 unsigned long long pages;
867 int err;
868
869 err = kstrtoint(buf, 10, &dirty_thresh_mb);
870 if (err)
871 return err;
872
873 if (dirty_thresh_mb <= 0)
874 return -EINVAL;
875
876 pages = dirty_thresh_mb;
877 pages <<= HMDFS_MB_TO_PAGE_SHIFT;
878 if (pages > INT_MAX) {
879 hmdfs_err("Illegal dirty_thresh_mb %d, its page count beyonds max int",
880 dirty_thresh_mb);
881 return -EINVAL;
882 }
883
884 hswb->dirty_thresh_pg = (unsigned int)pages;
885 return len;
886 }
887
888 static struct sbi_attribute sbi_srv_dirty_thresh_attr =
889 __ATTR(srv_dirty_thresh, 0644, sbi_srv_dirty_thresh_show,
890 sbi_srv_dirty_thresh_store);
891
892
sbi_srv_dirty_wb_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)893 static ssize_t sbi_srv_dirty_wb_control_show(struct kobject *kobj,
894 struct sbi_attribute *attr,
895 char *buf)
896 {
897 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
898
899 return snprintf(buf, PAGE_SIZE, "%d\n",
900 sbi->h_swb->dirty_writeback_control);
901 }
902
sbi_srv_dirty_wb_conctrol_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)903 static ssize_t sbi_srv_dirty_wb_conctrol_store(struct kobject *kobj,
904 struct sbi_attribute *attr,
905 const char *buf,
906 size_t len)
907 {
908 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
909 bool dirty_writeback_control = true;
910 int err;
911
912 err = kstrtobool(buf, &dirty_writeback_control);
913 if (err)
914 return err;
915
916 hswb->dirty_writeback_control = dirty_writeback_control;
917
918 return len;
919 }
920
921 static struct sbi_attribute sbi_srv_dirty_wb_control_attr =
922 __ATTR(srv_dirty_writeback_control, 0644, sbi_srv_dirty_wb_control_show,
923 sbi_srv_dirty_wb_conctrol_store);
924
sbi_dcache_timeout_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)925 static ssize_t sbi_dcache_timeout_show(struct kobject *kobj,
926 struct sbi_attribute *attr, char *buf)
927 {
928 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
929
930 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->dcache_timeout);
931 }
932
sbi_dcache_timeout_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)933 static ssize_t sbi_dcache_timeout_store(struct kobject *kobj,
934 struct sbi_attribute *attr,
935 const char *buf, size_t len)
936 {
937 struct hmdfs_sb_info *sbi = to_sbi(kobj);
938 unsigned int timeout;
939 int err;
940
941 err = kstrtouint(buf, 0, &timeout);
942 if (err)
943 return err;
944
945 /* zero is invalid, and it doesn't mean no cache */
946 if (timeout == 0 || timeout > MAX_DCACHE_TIMEOUT)
947 return -EINVAL;
948
949 sbi->dcache_timeout = timeout;
950
951 return len;
952 }
953
954 static struct sbi_attribute sbi_dcache_timeout_attr =
955 __ATTR(dcache_timeout, 0644, sbi_dcache_timeout_show,
956 sbi_dcache_timeout_store);
957
sbi_write_cache_timeout_sec_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)958 static ssize_t sbi_write_cache_timeout_sec_show(struct kobject *kobj,
959 struct sbi_attribute *attr, char *buf)
960 {
961 return snprintf(buf, PAGE_SIZE, "%u\n",
962 to_sbi(kobj)->write_cache_timeout);
963 }
964
sbi_write_cache_timeout_sec_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)965 static ssize_t sbi_write_cache_timeout_sec_store(struct kobject *kobj,
966 struct sbi_attribute *attr, const char *buf, size_t len)
967 {
968 int ret;
969 unsigned int timeout;
970 struct hmdfs_sb_info *sbi = to_sbi(kobj);
971
972 ret = kstrtouint(buf, 0, &timeout);
973 if (ret)
974 return ret;
975
976 /* set write_cache_timeout to 0 means this functionality is disabled */
977 sbi->write_cache_timeout = timeout;
978
979 return len;
980 }
981
982 static struct sbi_attribute sbi_write_cache_timeout_sec_attr =
983 __ATTR(write_cache_timeout_sec, 0664, sbi_write_cache_timeout_sec_show,
984 sbi_write_cache_timeout_sec_store);
985
sbi_node_evt_cb_delay_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)986 static ssize_t sbi_node_evt_cb_delay_show(struct kobject *kobj,
987 struct sbi_attribute *attr,
988 char *buf)
989 {
990 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
991
992 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_cb_delay);
993 }
994
sbi_node_evt_cb_delay_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)995 static ssize_t sbi_node_evt_cb_delay_store(struct kobject *kobj,
996 struct sbi_attribute *attr,
997 const char *buf,
998 size_t len)
999 {
1000 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1001 unsigned int delay = 0;
1002 int err;
1003
1004 err = kstrtouint(buf, 10, &delay);
1005 if (err)
1006 return err;
1007
1008 sbi->async_cb_delay = delay;
1009
1010 return len;
1011 }
1012
1013 static struct sbi_attribute sbi_node_evt_cb_delay_attr =
1014 __ATTR(node_event_delay, 0644, sbi_node_evt_cb_delay_show,
1015 sbi_node_evt_cb_delay_store);
1016
calc_idr_number(struct idr * idr)1017 static int calc_idr_number(struct idr *idr)
1018 {
1019 void *entry = NULL;
1020 int id;
1021 int number = 0;
1022
1023 idr_for_each_entry(idr, entry, id) {
1024 number++;
1025 if (number % HMDFS_IDR_RESCHED_COUNT == 0)
1026 cond_resched();
1027 }
1028
1029 return number;
1030 }
1031
sbi_show_idr_stats(struct kobject * kobj,struct sbi_attribute * attr,char * buf,bool showmsg)1032 static ssize_t sbi_show_idr_stats(struct kobject *kobj,
1033 struct sbi_attribute *attr,
1034 char *buf, bool showmsg)
1035 {
1036 ssize_t size = 0;
1037 int count;
1038 struct hmdfs_sb_info *sbi = NULL;
1039 struct hmdfs_peer *peer = NULL;
1040 struct idr *idr = NULL;
1041
1042 sbi = to_sbi(kobj);
1043
1044 mutex_lock(&sbi->connections.node_lock);
1045 list_for_each_entry(peer, &sbi->connections.node_list, list) {
1046 idr = showmsg ? &peer->msg_idr : &peer->file_id_idr;
1047 count = calc_idr_number(idr);
1048 size += snprintf(buf + size, PAGE_SIZE - size,
1049 "device-id\tcount\tnext-id\n\t%llu\t\t%d\t%u\n",
1050 peer->device_id, count, idr_get_cursor(idr));
1051 if (size >= PAGE_SIZE) {
1052 size = PAGE_SIZE;
1053 break;
1054 }
1055 }
1056 mutex_unlock(&sbi->connections.node_lock);
1057
1058 return size;
1059 }
1060
pending_message_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1061 static ssize_t pending_message_show(struct kobject *kobj,
1062 struct sbi_attribute *attr,
1063 char *buf)
1064 {
1065 return sbi_show_idr_stats(kobj, attr, buf, true);
1066 }
1067
1068 static struct sbi_attribute sbi_pending_message_attr =
1069 __ATTR_RO(pending_message);
1070
peer_opened_fd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1071 static ssize_t peer_opened_fd_show(struct kobject *kobj,
1072 struct sbi_attribute *attr, char *buf)
1073 {
1074 return sbi_show_idr_stats(kobj, attr, buf, false);
1075 }
1076
1077 static struct sbi_attribute sbi_peer_opened_fd_attr = __ATTR_RO(peer_opened_fd);
1078
sbi_srv_req_max_active_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1079 static ssize_t sbi_srv_req_max_active_attr_show(struct kobject *kobj,
1080 struct sbi_attribute *attr,
1081 char *buf)
1082 {
1083 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
1084
1085 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_req_max_active);
1086 }
1087
sbi_srv_req_max_active_attr_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)1088 static ssize_t sbi_srv_req_max_active_attr_store(struct kobject *kobj,
1089 struct sbi_attribute *attr, const char *buf, size_t len)
1090 {
1091 int ret;
1092 unsigned int max_active;
1093 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1094
1095 ret = kstrtouint(buf, 0, &max_active);
1096 if (ret)
1097 return ret;
1098
1099 sbi->async_req_max_active = max_active;
1100
1101 return len;
1102 }
1103
1104 static struct sbi_attribute sbi_srv_req_max_active_attr =
1105 __ATTR(srv_req_handle_max_active, 0644, sbi_srv_req_max_active_attr_show,
1106 sbi_srv_req_max_active_attr_store);
1107
1108
cache_file_show(struct hmdfs_sb_info * sbi,struct list_head * head,char * buf)1109 static ssize_t cache_file_show(struct hmdfs_sb_info *sbi,
1110 struct list_head *head, char *buf)
1111 {
1112 struct cache_file_node *cfn = NULL;
1113 ssize_t pos = 0;
1114
1115 mutex_lock(&sbi->cache_list_lock);
1116 list_for_each_entry(cfn, head, list) {
1117 pos += snprintf(buf + pos, PAGE_SIZE - pos,
1118 "dev_id: %s relative_path: %s\n",
1119 cfn->cid, cfn->relative_path);
1120 if (pos >= PAGE_SIZE) {
1121 pos = PAGE_SIZE;
1122 break;
1123 }
1124 }
1125 mutex_unlock(&sbi->cache_list_lock);
1126
1127 return pos;
1128 }
1129
client_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1130 static ssize_t client_cache_file_show(struct kobject *kobj,
1131 struct sbi_attribute *attr, char *buf)
1132 {
1133 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->client_cache, buf);
1134 }
server_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1135 static ssize_t server_cache_file_show(struct kobject *kobj,
1136 struct sbi_attribute *attr, char *buf)
1137 {
1138 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->server_cache, buf);
1139 }
1140
1141 static struct sbi_attribute sbi_server_cache_file_attr =
1142 __ATTR_RO(server_cache_file);
1143 static struct sbi_attribute sbi_client_cache_file_attr =
1144 __ATTR_RO(client_cache_file);
1145
sb_seq_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1146 static ssize_t sb_seq_show(struct kobject *kobj, struct sbi_attribute *attr,
1147 char *buf)
1148 {
1149 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->seq);
1150 }
1151
1152 static struct sbi_attribute sbi_seq_attr = __ATTR_RO(sb_seq);
1153
peers_sum_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1154 static ssize_t peers_sum_attr_show(struct kobject *kobj,
1155 struct sbi_attribute *attr, char *buf)
1156 {
1157 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1158 struct hmdfs_peer *node = NULL;
1159 unsigned int stash_ok = 0, stash_fail = 0, restore_ok = 0,
1160 restore_fail = 0, rebuild_ok = 0, rebuild_fail = 0, rebuild_invalid = 0,
1161 rebuild_time = 0;
1162 unsigned long long stash_ok_pages = 0, stash_fail_pages = 0,
1163 restore_ok_pages = 0, restore_fail_pages = 0;
1164
1165 mutex_lock(&sbi->connections.node_lock);
1166 list_for_each_entry(node, &sbi->connections.node_list, list) {
1167 peer_get(node);
1168 mutex_unlock(&sbi->connections.node_lock);
1169 stash_ok += node->stats.stash.total_ok;
1170 stash_fail += node->stats.stash.total_fail;
1171 stash_ok_pages += node->stats.stash.ok_pages;
1172 stash_fail_pages += node->stats.stash.fail_pages;
1173 restore_ok += node->stats.restore.total_ok;
1174 restore_fail += node->stats.restore.total_fail;
1175 restore_ok_pages += node->stats.restore.ok_pages;
1176 restore_fail_pages += node->stats.restore.fail_pages;
1177 rebuild_ok += node->stats.rebuild.total_ok;
1178 rebuild_fail += node->stats.rebuild.total_fail;
1179 rebuild_invalid += node->stats.rebuild.total_invalid;
1180 rebuild_time += node->stats.rebuild.time;
1181 peer_put(node);
1182 mutex_lock(&sbi->connections.node_lock);
1183 }
1184 mutex_unlock(&sbi->connections.node_lock);
1185
1186 return snprintf(buf, PAGE_SIZE,
1187 "%u %u %llu %llu\n"
1188 "%u %u %llu %llu\n"
1189 "%u %u %u %u\n",
1190 stash_ok, stash_fail, stash_ok_pages, stash_fail_pages,
1191 restore_ok, restore_fail, restore_ok_pages,
1192 restore_fail_pages, rebuild_ok, rebuild_fail,
1193 rebuild_invalid, rebuild_time);
1194 }
1195
1196 static struct sbi_attribute sbi_peers_attr = __ATTR_RO(peers_sum_attr);
1197
1198 const char * const flag_name[] = {
1199 "READPAGES",
1200 "READPAGES_OPEN",
1201 "ATOMIC_OPEN",
1202 };
1203
fill_features(char * buf,unsigned long long flag)1204 static ssize_t fill_features(char *buf, unsigned long long flag)
1205 {
1206 int i;
1207 ssize_t pos = 0;
1208 bool sep = false;
1209 int flag_name_count = ARRAY_SIZE(flag_name) / sizeof(flag_name[0]);
1210
1211 for (i = 0; i < sizeof(flag) * BITS_PER_BYTE; ++i) {
1212 if (!(flag & BIT(i)))
1213 continue;
1214
1215 if (sep)
1216 pos += snprintf(buf + pos, PAGE_SIZE - pos, "|");
1217 sep = true;
1218
1219 if (pos >= PAGE_SIZE) {
1220 pos = PAGE_SIZE;
1221 break;
1222 }
1223
1224 if (i < flag_name_count && flag_name[i])
1225 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%s",
1226 flag_name[i]);
1227 else
1228 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d", i);
1229
1230 if (pos >= PAGE_SIZE) {
1231 pos = PAGE_SIZE;
1232 break;
1233 }
1234 }
1235 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1236 if (pos >= PAGE_SIZE)
1237 pos = PAGE_SIZE;
1238
1239 return pos;
1240 }
1241
sbi_features_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1242 static ssize_t sbi_features_show(struct kobject *kobj,
1243 struct sbi_attribute *attr, char *buf)
1244 {
1245 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1246
1247 return fill_features(buf, sbi->s_features);
1248 }
1249
1250 static struct sbi_attribute sbi_features_attr = __ATTR(features, 0444,
1251 sbi_features_show, NULL);
1252
1253 static struct attribute *sbi_attrs[] = {
1254 &sbi_cmd_attr.attr,
1255 &sbi_status_attr.attr,
1256 &sbi_statistic_attr.attr,
1257 &sbi_dcache_precision_attr.attr,
1258 &sbi_dcache_threshold_attr.attr,
1259 &sbi_dcache_timeout_attr.attr,
1260 &sbi_write_cache_timeout_sec_attr.attr,
1261 &sbi_local_op_attr.attr,
1262 &sbi_delay_resp_attr.attr,
1263 &sbi_wb_timeout_ms_attr.attr,
1264 &sbi_dirty_writeback_centisecs_attr.attr,
1265 &sbi_dirty_file_background_bytes_attr.attr,
1266 &sbi_dirty_fs_background_bytes_attr.attr,
1267 &sbi_dirty_file_bytes_attr.attr,
1268 &sbi_dirty_fs_bytes_attr.attr,
1269 &sbi_dirty_writeback_autothresh_attr.attr,
1270 &sbi_dirty_writeback_timelimit_attr.attr,
1271 &sbi_dirty_thresh_lowerlimit_attr.attr,
1272 &sbi_dirty_writeback_control_attr.attr,
1273 &sbi_dirty_writeback_stats_attr.attr,
1274 &sbi_srv_dirty_thresh_attr.attr,
1275 &sbi_srv_dirty_wb_control_attr.attr,
1276 &sbi_node_evt_cb_delay_attr.attr,
1277 &sbi_srv_req_max_active_attr.attr,
1278 &sbi_pending_message_attr.attr,
1279 &sbi_peer_opened_fd_attr.attr,
1280 &sbi_server_cache_file_attr.attr,
1281 &sbi_client_cache_file_attr.attr,
1282 &sbi_seq_attr.attr,
1283 &sbi_peers_attr.attr,
1284 &sbi_features_attr.attr,
1285 NULL,
1286 };
1287
sbi_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1288 static ssize_t sbi_attr_show(struct kobject *kobj, struct attribute *attr,
1289 char *buf)
1290 {
1291 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1292
1293 if (!sbi_attr->show)
1294 return -EIO;
1295 return sbi_attr->show(kobj, sbi_attr, buf);
1296 }
1297
sbi_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1298 static ssize_t sbi_attr_store(struct kobject *kobj, struct attribute *attr,
1299 const char *buf, size_t len)
1300 {
1301 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1302
1303 if (!sbi_attr->store)
1304 return -EIO;
1305 return sbi_attr->store(kobj, sbi_attr, buf, len);
1306 }
1307
1308 static const struct sysfs_ops sbi_sysfs_ops = {
1309 .show = sbi_attr_show,
1310 .store = sbi_attr_store,
1311 };
1312
sbi_release(struct kobject * kobj)1313 static void sbi_release(struct kobject *kobj)
1314 {
1315 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1316
1317 complete(&sbi->s_kobj_unregister);
1318 }
1319
1320 static struct kobj_type sbi_ktype = {
1321 .sysfs_ops = &sbi_sysfs_ops,
1322 .default_attrs = sbi_attrs,
1323 .release = sbi_release,
1324 };
1325
to_sbi_cmd_attr(struct attribute * x)1326 static inline struct sbi_cmd_attribute *to_sbi_cmd_attr(struct attribute *x)
1327 {
1328 return container_of(x, struct sbi_cmd_attribute, attr);
1329 }
1330
cmd_kobj_to_sbi(struct kobject * x)1331 static inline struct hmdfs_sb_info *cmd_kobj_to_sbi(struct kobject *x)
1332 {
1333 return container_of(x, struct hmdfs_sb_info, s_cmd_timeout_kobj);
1334 }
1335
cmd_timeout_show(struct kobject * kobj,struct attribute * attr,char * buf)1336 static ssize_t cmd_timeout_show(struct kobject *kobj, struct attribute *attr,
1337 char *buf)
1338 {
1339 int cmd = to_sbi_cmd_attr(attr)->command;
1340 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1341
1342 if (cmd < 0 && cmd >= F_SIZE)
1343 return 0;
1344
1345 return snprintf(buf, PAGE_SIZE, "%u\n", get_cmd_timeout(sbi, cmd));
1346 }
1347
cmd_timeout_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1348 static ssize_t cmd_timeout_store(struct kobject *kobj, struct attribute *attr,
1349 const char *buf, size_t len)
1350 {
1351 unsigned int value;
1352 int cmd = to_sbi_cmd_attr(attr)->command;
1353 int ret = kstrtouint(skip_spaces(buf), 0, &value);
1354 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1355
1356 if (cmd < 0 && cmd >= F_SIZE)
1357 return -EINVAL;
1358
1359 if (!ret)
1360 set_cmd_timeout(sbi, cmd, value);
1361
1362 return ret ? ret : len;
1363 }
1364
1365 #define HMDFS_CMD_ATTR(_name, _cmd) \
1366 static struct sbi_cmd_attribute hmdfs_attr_##_name = { \
1367 .attr = { .name = __stringify(_name), .mode = 0664 }, \
1368 .command = (_cmd), \
1369 }
1370
1371 HMDFS_CMD_ATTR(open, F_OPEN);
1372 HMDFS_CMD_ATTR(release, F_RELEASE);
1373 HMDFS_CMD_ATTR(readpage, F_READPAGE);
1374 HMDFS_CMD_ATTR(writepage, F_WRITEPAGE);
1375 HMDFS_CMD_ATTR(iterate, F_ITERATE);
1376 HMDFS_CMD_ATTR(rmdir, F_RMDIR);
1377 HMDFS_CMD_ATTR(unlink, F_UNLINK);
1378 HMDFS_CMD_ATTR(rename, F_RENAME);
1379 HMDFS_CMD_ATTR(setattr, F_SETATTR);
1380 HMDFS_CMD_ATTR(statfs, F_STATFS);
1381 HMDFS_CMD_ATTR(drop_push, F_DROP_PUSH);
1382 HMDFS_CMD_ATTR(getattr, F_GETATTR);
1383 HMDFS_CMD_ATTR(fsync, F_FSYNC);
1384 HMDFS_CMD_ATTR(syncfs, F_SYNCFS);
1385 HMDFS_CMD_ATTR(getxattr, F_GETXATTR);
1386 HMDFS_CMD_ATTR(setxattr, F_SETXATTR);
1387 HMDFS_CMD_ATTR(listxattr, F_LISTXATTR);
1388
1389 #define ATTR_LIST(_name) (&hmdfs_attr_##_name.attr)
1390
1391 static struct attribute *sbi_timeout_attrs[] = {
1392 ATTR_LIST(open), ATTR_LIST(release),
1393 ATTR_LIST(readpage), ATTR_LIST(writepage),
1394 ATTR_LIST(iterate), ATTR_LIST(rmdir),
1395 ATTR_LIST(unlink), ATTR_LIST(rename),
1396 ATTR_LIST(setattr),
1397 ATTR_LIST(statfs), ATTR_LIST(drop_push),
1398 ATTR_LIST(getattr), ATTR_LIST(fsync),
1399 ATTR_LIST(syncfs), ATTR_LIST(getxattr),
1400 ATTR_LIST(setxattr), ATTR_LIST(listxattr),
1401 NULL
1402 };
1403
1404 static const struct sysfs_ops sbi_cmd_sysfs_ops = {
1405 .show = cmd_timeout_show,
1406 .store = cmd_timeout_store,
1407 };
1408
sbi_timeout_release(struct kobject * kobj)1409 static void sbi_timeout_release(struct kobject *kobj)
1410 {
1411 struct hmdfs_sb_info *sbi = container_of(kobj, struct hmdfs_sb_info,
1412 s_cmd_timeout_kobj);
1413
1414 complete(&sbi->s_timeout_kobj_unregister);
1415 }
1416
1417 static struct kobj_type sbi_timeout_ktype = {
1418 .sysfs_ops = &sbi_cmd_sysfs_ops,
1419 .default_attrs = sbi_timeout_attrs,
1420 .release = sbi_timeout_release,
1421 };
1422
hmdfs_release_sysfs(struct hmdfs_sb_info * sbi)1423 void hmdfs_release_sysfs(struct hmdfs_sb_info *sbi)
1424 {
1425 kobject_put(&sbi->s_cmd_timeout_kobj);
1426 wait_for_completion(&sbi->s_timeout_kobj_unregister);
1427 kobject_put(&sbi->kobj);
1428 wait_for_completion(&sbi->s_kobj_unregister);
1429 }
1430
hmdfs_register_sysfs(const char * name,struct hmdfs_sb_info * sbi)1431 int hmdfs_register_sysfs(const char *name, struct hmdfs_sb_info *sbi)
1432 {
1433 int ret;
1434 struct kobject *kobj = NULL;
1435
1436 mutex_lock(&hmdfs_sysfs_mutex);
1437 kobj = kset_find_obj(hmdfs_kset, name);
1438 if (kobj) {
1439 hmdfs_err("mount failed, already exist");
1440 kobject_put(kobj);
1441 mutex_unlock(&hmdfs_sysfs_mutex);
1442 return -EEXIST;
1443 }
1444
1445 sbi->kobj.kset = hmdfs_kset;
1446 init_completion(&sbi->s_kobj_unregister);
1447 ret = kobject_init_and_add(&sbi->kobj, &sbi_ktype,
1448 &hmdfs_kset->kobj, "%s", name);
1449 sysfs_change_owner(&sbi->kobj, KUIDT_INIT(1000), KGIDT_INIT(1000));
1450 mutex_unlock(&hmdfs_sysfs_mutex);
1451
1452 if (ret) {
1453 kobject_put(&sbi->kobj);
1454 wait_for_completion(&sbi->s_kobj_unregister);
1455 return ret;
1456 }
1457
1458 init_completion(&sbi->s_timeout_kobj_unregister);
1459 ret = kobject_init_and_add(&sbi->s_cmd_timeout_kobj, &sbi_timeout_ktype,
1460 &sbi->kobj, "cmd_timeout");
1461 if (ret) {
1462 hmdfs_release_sysfs(sbi);
1463 return ret;
1464 }
1465
1466 kobject_uevent(&sbi->kobj, KOBJ_ADD);
1467 return 0;
1468 }
1469
hmdfs_unregister_sysfs(struct hmdfs_sb_info * sbi)1470 void hmdfs_unregister_sysfs(struct hmdfs_sb_info *sbi)
1471 {
1472 kobject_del(&sbi->s_cmd_timeout_kobj);
1473 kobject_del(&sbi->kobj);
1474 }
1475
to_sysfs_fmt_evt(unsigned int evt)1476 static inline int to_sysfs_fmt_evt(unsigned int evt)
1477 {
1478 return evt == RAW_NODE_EVT_NR ? -1 : evt;
1479 }
1480
features_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1481 static ssize_t features_show(struct kobject *kobj, struct peer_attribute *attr,
1482 char *buf)
1483 {
1484 struct hmdfs_peer *peer = to_peer(kobj);
1485
1486 return fill_features(buf, peer->features);
1487 }
1488
event_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1489 static ssize_t event_show(struct kobject *kobj, struct peer_attribute *attr,
1490 char *buf)
1491 {
1492 struct hmdfs_peer *peer = to_peer(kobj);
1493
1494 return snprintf(buf, PAGE_SIZE,
1495 "cur_async evt %d seq %u\n"
1496 "cur_sync evt %d seq %u\n"
1497 "pending evt %d seq %u\n"
1498 "merged evt %u\n"
1499 "dup_drop evt %u %u\n"
1500 "waiting evt %u %u\n"
1501 "seq_tbl %u %u %u %u\n"
1502 "seq_rd_idx %u\n"
1503 "seq_wr_idx %u\n",
1504 to_sysfs_fmt_evt(peer->cur_evt[0]),
1505 peer->cur_evt_seq[0],
1506 to_sysfs_fmt_evt(peer->cur_evt[1]),
1507 peer->cur_evt_seq[1],
1508 to_sysfs_fmt_evt(peer->pending_evt),
1509 peer->pending_evt_seq,
1510 peer->merged_evt,
1511 peer->dup_evt[RAW_NODE_EVT_OFF],
1512 peer->dup_evt[RAW_NODE_EVT_ON],
1513 peer->waiting_evt[RAW_NODE_EVT_OFF],
1514 peer->waiting_evt[RAW_NODE_EVT_ON],
1515 peer->seq_tbl[0], peer->seq_tbl[1], peer->seq_tbl[2],
1516 peer->seq_tbl[3],
1517 peer->seq_rd_idx % RAW_NODE_EVT_MAX_NR,
1518 peer->seq_wr_idx % RAW_NODE_EVT_MAX_NR);
1519 }
1520
stash_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1521 static ssize_t stash_show(struct kobject *kobj, struct peer_attribute *attr,
1522 char *buf)
1523 {
1524 struct hmdfs_peer *peer = to_peer(kobj);
1525
1526 return snprintf(buf, PAGE_SIZE,
1527 "cur_ok %u\n"
1528 "cur_nothing %u\n"
1529 "cur_fail %u\n"
1530 "total_ok %u\n"
1531 "total_nothing %u\n"
1532 "total_fail %u\n"
1533 "ok_pages %llu\n"
1534 "fail_pages %llu\n",
1535 peer->stats.stash.cur_ok,
1536 peer->stats.stash.cur_nothing,
1537 peer->stats.stash.cur_fail,
1538 peer->stats.stash.total_ok,
1539 peer->stats.stash.total_nothing,
1540 peer->stats.stash.total_fail,
1541 peer->stats.stash.ok_pages,
1542 peer->stats.stash.fail_pages);
1543 }
1544
restore_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1545 static ssize_t restore_show(struct kobject *kobj, struct peer_attribute *attr,
1546 char *buf)
1547 {
1548 struct hmdfs_peer *peer = to_peer(kobj);
1549
1550 return snprintf(buf, PAGE_SIZE,
1551 "cur_ok %u\n"
1552 "cur_fail %u\n"
1553 "cur_keep %u\n"
1554 "total_ok %u\n"
1555 "total_fail %u\n"
1556 "total_keep %u\n"
1557 "ok_pages %llu\n"
1558 "fail_pages %llu\n",
1559 peer->stats.restore.cur_ok,
1560 peer->stats.restore.cur_fail,
1561 peer->stats.restore.cur_keep,
1562 peer->stats.restore.total_ok,
1563 peer->stats.restore.total_fail,
1564 peer->stats.restore.total_keep,
1565 peer->stats.restore.ok_pages,
1566 peer->stats.restore.fail_pages);
1567 }
1568
rebuild_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1569 static ssize_t rebuild_show(struct kobject *kobj, struct peer_attribute *attr,
1570 char *buf)
1571 {
1572 struct hmdfs_peer *peer = to_peer(kobj);
1573
1574 return snprintf(buf, PAGE_SIZE,
1575 "cur_ok %u\n"
1576 "cur_fail %u\n"
1577 "cur_invalid %u\n"
1578 "total_ok %u\n"
1579 "total_fail %u\n"
1580 "total_invalid %u\n"
1581 "time %u\n",
1582 peer->stats.rebuild.cur_ok,
1583 peer->stats.rebuild.cur_fail,
1584 peer->stats.rebuild.cur_invalid,
1585 peer->stats.rebuild.total_ok,
1586 peer->stats.rebuild.total_fail,
1587 peer->stats.rebuild.total_invalid,
1588 peer->stats.rebuild.time);
1589 }
1590
1591 static struct peer_attribute peer_features_attr = __ATTR_RO(features);
1592 static struct peer_attribute peer_event_attr = __ATTR_RO(event);
1593 static struct peer_attribute peer_stash_attr = __ATTR_RO(stash);
1594 static struct peer_attribute peer_restore_attr = __ATTR_RO(restore);
1595 static struct peer_attribute peer_rebuild_attr = __ATTR_RO(rebuild);
1596
1597 static struct attribute *peer_attrs[] = {
1598 &peer_features_attr.attr,
1599 &peer_event_attr.attr,
1600 &peer_stash_attr.attr,
1601 &peer_restore_attr.attr,
1602 &peer_rebuild_attr.attr,
1603 NULL,
1604 };
1605
peer_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1606 static ssize_t peer_attr_show(struct kobject *kobj, struct attribute *attr,
1607 char *buf)
1608 {
1609 struct peer_attribute *peer_attr = to_peer_attr(attr);
1610
1611 if (!peer_attr->show)
1612 return -EIO;
1613 return peer_attr->show(kobj, peer_attr, buf);
1614 }
1615
peer_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1616 static ssize_t peer_attr_store(struct kobject *kobj, struct attribute *attr,
1617 const char *buf, size_t len)
1618 {
1619 struct peer_attribute *peer_attr = to_peer_attr(attr);
1620
1621 if (!peer_attr->store)
1622 return -EIO;
1623 return peer_attr->store(kobj, peer_attr, buf, len);
1624 }
1625
1626 static const struct sysfs_ops peer_sysfs_ops = {
1627 .show = peer_attr_show,
1628 .store = peer_attr_store,
1629 };
1630
peer_sysfs_release(struct kobject * kobj)1631 static void peer_sysfs_release(struct kobject *kobj)
1632 {
1633 struct hmdfs_peer *peer = to_peer(kobj);
1634
1635 complete(&peer->kobj_unregister);
1636 }
1637
1638 static struct kobj_type peer_ktype = {
1639 .sysfs_ops = &peer_sysfs_ops,
1640 .default_attrs = peer_attrs,
1641 .release = peer_sysfs_release,
1642 };
1643
hmdfs_register_peer_sysfs(struct hmdfs_sb_info * sbi,struct hmdfs_peer * peer)1644 int hmdfs_register_peer_sysfs(struct hmdfs_sb_info *sbi,
1645 struct hmdfs_peer *peer)
1646 {
1647 int err = 0;
1648
1649 init_completion(&peer->kobj_unregister);
1650 err = kobject_init_and_add(&peer->kobj, &peer_ktype, &sbi->kobj,
1651 "peer_%llu", peer->device_id);
1652 return err;
1653 }
1654
hmdfs_release_peer_sysfs(struct hmdfs_peer * peer)1655 void hmdfs_release_peer_sysfs(struct hmdfs_peer *peer)
1656 {
1657 kobject_del(&peer->kobj);
1658 kobject_put(&peer->kobj);
1659 wait_for_completion(&peer->kobj_unregister);
1660 }
1661
notify(struct hmdfs_peer * node,struct notify_param * param)1662 void notify(struct hmdfs_peer *node, struct notify_param *param)
1663 {
1664 struct hmdfs_sb_info *sbi = node->sbi;
1665 int in_len;
1666
1667 if (!param)
1668 return;
1669 spin_lock(&sbi->notify_fifo_lock);
1670 in_len =
1671 kfifo_in(&sbi->notify_fifo, param, sizeof(struct notify_param));
1672 spin_unlock(&sbi->notify_fifo_lock);
1673 if (in_len != sizeof(struct notify_param))
1674 return;
1675 sysfs_notify(&sbi->kobj, NULL, "cmd");
1676 }
1677
hmdfs_sysfs_init(void)1678 int hmdfs_sysfs_init(void)
1679 {
1680 hmdfs_kset = kset_create_and_add("hmdfs", NULL, fs_kobj);
1681 if (!hmdfs_kset)
1682 return -ENOMEM;
1683
1684 return 0;
1685 }
1686
hmdfs_sysfs_exit(void)1687 void hmdfs_sysfs_exit(void)
1688 {
1689 kset_unregister(hmdfs_kset);
1690 hmdfs_kset = NULL;
1691 }
1692