1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/comm/device_node.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "device_node.h"
9
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kfifo.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/sysfs.h>
18 #include <linux/types.h>
19 #include <linux/backing-dev.h>
20
21 #include "client_writeback.h"
22 #include "server_writeback.h"
23 #include "connection.h"
24 #include "hmdfs_client.h"
25 #include "socket_adapter.h"
26 #include "authority/authentication.h"
27
28 DEFINE_MUTEX(hmdfs_sysfs_mutex);
29 static struct kset *hmdfs_kset;
30
31 struct hmdfs_disconnect_node_work {
32 struct hmdfs_peer *conn;
33 struct work_struct work;
34 atomic_t *cnt;
35 struct wait_queue_head *waitq;
36 };
37
ctrl_cmd_update_socket_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)38 static void ctrl_cmd_update_socket_handler(const char *buf, size_t len,
39 struct hmdfs_sb_info *sbi)
40 {
41 struct update_socket_param cmd;
42 struct hmdfs_peer *node = NULL;
43 struct connection *conn = NULL;
44
45 if (unlikely(!buf || len != sizeof(cmd))) {
46 hmdfs_err("len/buf error");
47 goto out;
48 }
49 memcpy(&cmd, buf, sizeof(cmd));
50
51 node = hmdfs_get_peer(sbi, cmd.cid, cmd.devsl);
52 if (unlikely(!node)) {
53 hmdfs_err("failed to update ctrl node: cannot get peer");
54 goto out;
55 }
56
57 conn = hmdfs_get_conn_tcp(node, cmd.newfd, cmd.masterkey, cmd.status);
58 if (unlikely(!conn)) {
59 hmdfs_err("failed to update ctrl node: cannot get conn");
60 } else if (!sbi->system_cred) {
61 const struct cred *system_cred = get_cred(current_cred());
62
63 if (cmpxchg_relaxed(&sbi->system_cred, NULL, system_cred))
64 put_cred(system_cred);
65 else
66 hmdfs_check_cred(system_cred);
67 }
68
69 if (conn)
70 connection_put(conn);
71 out:
72 if (node)
73 peer_put(node);
74 }
75
ctrl_cmd_update_devsl_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)76 static void ctrl_cmd_update_devsl_handler(const char *buf, size_t len,
77 struct hmdfs_sb_info *sbi)
78 {
79 struct update_devsl_param cmd;
80 struct hmdfs_peer *node = NULL;
81
82 if (unlikely(!buf || len != sizeof(cmd))) {
83 hmdfs_err("Recved a invalid userbuf");
84 return;
85 }
86 memcpy(&cmd, buf, sizeof(cmd));
87
88 node = hmdfs_lookup_from_cid(sbi, cmd.cid);
89 if (unlikely(!node)) {
90 hmdfs_err("failed to update devsl: cannot get peer");
91 return;
92 }
93 hmdfs_info("Found peer: device_id = %llu", node->device_id);
94 node->devsl = cmd.devsl;
95 peer_put(node);
96 }
97
hmdfs_disconnect_node_marked(struct hmdfs_peer * conn)98 static inline void hmdfs_disconnect_node_marked(struct hmdfs_peer *conn)
99 {
100 hmdfs_start_process_offline(conn);
101 hmdfs_disconnect_node(conn);
102 hmdfs_stop_process_offline(conn);
103 }
104
ctrl_cmd_off_line_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)105 static void ctrl_cmd_off_line_handler(const char *buf, size_t len,
106 struct hmdfs_sb_info *sbi)
107 {
108 struct offline_param cmd;
109 struct hmdfs_peer *node = NULL;
110
111 if (unlikely(!buf || len != sizeof(cmd))) {
112 hmdfs_err("Recved a invalid userbuf");
113 return;
114 }
115 memcpy(&cmd, buf, sizeof(cmd));
116 node = hmdfs_lookup_from_cid(sbi, cmd.remote_cid);
117 if (unlikely(!node)) {
118 hmdfs_err("Cannot find node by device");
119 return;
120 }
121 hmdfs_info("Found peer: device_id = %llu", node->device_id);
122 hmdfs_disconnect_node_marked(node);
123 peer_put(node);
124 }
125
hmdfs_disconnect_node_work_fn(struct work_struct * base)126 static void hmdfs_disconnect_node_work_fn(struct work_struct *base)
127 {
128 struct hmdfs_disconnect_node_work *work =
129 container_of(base, struct hmdfs_disconnect_node_work, work);
130
131 hmdfs_disconnect_node_marked(work->conn);
132 if (atomic_dec_and_test(work->cnt))
133 wake_up(work->waitq);
134 kfree(work);
135 }
136
ctrl_cmd_off_line_all_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)137 static void ctrl_cmd_off_line_all_handler(const char *buf, size_t len,
138 struct hmdfs_sb_info *sbi)
139 {
140 struct hmdfs_peer *node = NULL;
141 struct hmdfs_disconnect_node_work *work = NULL;
142 atomic_t cnt = ATOMIC_INIT(0);
143 wait_queue_head_t waitq;
144
145 if (unlikely(len != sizeof(struct offline_all_param))) {
146 hmdfs_err("Recved a invalid userbuf, len %zu, expect %zu\n",
147 len, sizeof(struct offline_all_param));
148 return;
149 }
150
151 init_waitqueue_head(&waitq);
152 mutex_lock(&sbi->connections.node_lock);
153 list_for_each_entry(node, &sbi->connections.node_list, list) {
154 mutex_unlock(&sbi->connections.node_lock);
155 work = kmalloc(sizeof(*work), GFP_KERNEL);
156 if (work) {
157 atomic_inc(&cnt);
158 work->conn = node;
159 work->cnt = &cnt;
160 work->waitq = &waitq;
161 INIT_WORK(&work->work, hmdfs_disconnect_node_work_fn);
162 schedule_work(&work->work);
163 } else {
164 hmdfs_disconnect_node_marked(node);
165 }
166 mutex_lock(&sbi->connections.node_lock);
167 }
168 mutex_unlock(&sbi->connections.node_lock);
169
170 wait_event(waitq, !atomic_read(&cnt));
171 }
172
173 typedef void (*ctrl_cmd_handler)(const char *buf, size_t len,
174 struct hmdfs_sb_info *sbi);
175
176 static const ctrl_cmd_handler cmd_handler[CMD_CNT] = {
177 [CMD_UPDATE_SOCKET] = ctrl_cmd_update_socket_handler,
178 [CMD_UPDATE_DEVSL] = ctrl_cmd_update_devsl_handler,
179 [CMD_OFF_LINE] = ctrl_cmd_off_line_handler,
180 [CMD_OFF_LINE_ALL] = ctrl_cmd_off_line_all_handler,
181 };
182
sbi_cmd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)183 static ssize_t sbi_cmd_show(struct kobject *kobj, struct sbi_attribute *attr,
184 char *buf)
185 {
186 struct notify_param param;
187 int out_len;
188 struct hmdfs_sb_info *sbi = to_sbi(kobj);
189
190 memset(¶m, 0, sizeof(param));
191 spin_lock(&sbi->notify_fifo_lock);
192 out_len = kfifo_out(&sbi->notify_fifo, ¶m, sizeof(param));
193 spin_unlock(&sbi->notify_fifo_lock);
194 if (out_len != sizeof(param))
195 param.notify = NOTIFY_NONE;
196 memcpy(buf, ¶m, sizeof(param));
197 return sizeof(param);
198 }
199
cmd2str(int cmd)200 static const char *cmd2str(int cmd)
201 {
202 switch (cmd) {
203 case 0:
204 return "CMD_UPDATE_SOCKET";
205 case 1:
206 return "CMD_UPDATE_DEVSL";
207 case 2:
208 return "CMD_OFF_LINE";
209 case 3:
210 return "CMD_OFF_LINE_ALL";
211 default:
212 return "illegal cmd";
213 }
214 }
215
sbi_cmd_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)216 static ssize_t sbi_cmd_store(struct kobject *kobj, struct sbi_attribute *attr,
217 const char *buf, size_t len)
218 {
219 int cmd;
220 struct hmdfs_sb_info *sbi = to_sbi(kobj);
221
222 if (!sbi) {
223 hmdfs_info("Fatal! Empty sbi. Mount fs first");
224 return len;
225 }
226 if (len < sizeof(int)) {
227 hmdfs_err("Illegal cmd: cmd len = %zu", len);
228 return len;
229 }
230 cmd = *(int *)buf;
231 if (cmd < 0 || cmd >= CMD_CNT) {
232 hmdfs_err("Illegal cmd : cmd = %d", cmd);
233 return len;
234 }
235 mutex_lock(&sbi->cmd_handler_mutex);
236 hmdfs_info("Recved cmd: %s", cmd2str(cmd));
237 if (cmd_handler[cmd])
238 cmd_handler[cmd](buf, len, sbi);
239 mutex_unlock(&sbi->cmd_handler_mutex);
240 return len;
241 }
242
243 static struct sbi_attribute sbi_cmd_attr =
244 __ATTR(cmd, 0664, sbi_cmd_show, sbi_cmd_store);
245
sbi_status_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)246 static ssize_t sbi_status_show(struct kobject *kobj, struct sbi_attribute *attr,
247 char *buf)
248 {
249 ssize_t size = 0;
250 struct hmdfs_sb_info *sbi = NULL;
251 struct hmdfs_peer *peer = NULL;
252 struct connection *conn_impl = NULL;
253 struct tcp_handle *tcp = NULL;
254
255 sbi = to_sbi(kobj);
256 size += sprintf(buf + size, "peers version status\n");
257
258 mutex_lock(&sbi->connections.node_lock);
259 list_for_each_entry(peer, &sbi->connections.node_list, list) {
260 size += sprintf(buf + size, "%llu %d %d\n", peer->device_id,
261 peer->version, peer->status);
262 // connection information
263 size += sprintf(
264 buf + size,
265 "\t socket_fd connection_status tcp_status ... refcnt\n");
266 mutex_lock(&peer->conn_impl_list_lock);
267 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
268 tcp = conn_impl->connect_handle;
269 size += sprintf(buf + size, "\t %d \t%d \t%d \t%p \t%ld\n",
270 tcp->fd, conn_impl->status,
271 tcp->sock->state, tcp->sock, file_count(tcp->sock->file));
272 }
273 mutex_unlock(&peer->conn_impl_list_lock);
274 }
275 mutex_unlock(&sbi->connections.node_lock);
276 return size;
277 }
278
sbi_status_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)279 static ssize_t sbi_status_store(struct kobject *kobj,
280 struct sbi_attribute *attr, const char *buf,
281 size_t len)
282 {
283 return len;
284 }
285
286 static struct sbi_attribute sbi_status_attr =
287 __ATTR(status, 0664, sbi_status_show, sbi_status_store);
288
sbi_stat_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)289 static ssize_t sbi_stat_show(struct kobject *kobj, struct sbi_attribute *attr,
290 char *buf)
291 {
292 ssize_t size = 0;
293 struct hmdfs_sb_info *sbi = NULL;
294 struct hmdfs_peer *peer = NULL;
295 struct connection *conn_impl = NULL;
296 struct tcp_handle *tcp = NULL;
297
298 sbi = to_sbi(kobj);
299 mutex_lock(&sbi->connections.node_lock);
300 list_for_each_entry(peer, &sbi->connections.node_list, list) {
301 // connection information
302 mutex_lock(&peer->conn_impl_list_lock);
303 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
304 tcp = conn_impl->connect_handle;
305 size += sprintf(buf + size, "socket_fd: %d\n", tcp->fd);
306 size += sprintf(buf + size,
307 "\tsend_msg %d \tsend_bytes %llu\n",
308 conn_impl->stat.send_message_count,
309 conn_impl->stat.send_bytes);
310 size += sprintf(buf + size,
311 "\trecv_msg %d \trecv_bytes %llu\n",
312 conn_impl->stat.recv_message_count,
313 conn_impl->stat.recv_bytes);
314 }
315 mutex_unlock(&peer->conn_impl_list_lock);
316 }
317 mutex_unlock(&sbi->connections.node_lock);
318 return size;
319 }
320
sbi_stat_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)321 static ssize_t sbi_stat_store(struct kobject *kobj, struct sbi_attribute *attr,
322 const char *buf, size_t len)
323 {
324 struct hmdfs_sb_info *sbi = NULL;
325 struct hmdfs_peer *peer = NULL;
326 struct connection *conn_impl = NULL;
327
328 sbi = to_sbi(kobj);
329 mutex_lock(&sbi->connections.node_lock);
330 list_for_each_entry(peer, &sbi->connections.node_list, list) {
331 // connection information
332 mutex_lock(&peer->conn_impl_list_lock);
333 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
334 conn_impl->stat.send_message_count = 0;
335 conn_impl->stat.send_bytes = 0;
336 conn_impl->stat.recv_message_count = 0;
337 conn_impl->stat.recv_bytes = 0;
338 }
339 mutex_unlock(&peer->conn_impl_list_lock);
340 }
341 mutex_unlock(&sbi->connections.node_lock);
342 return len;
343 }
344
345 static struct sbi_attribute sbi_statistic_attr =
346 __ATTR(statistic, 0664, sbi_stat_show, sbi_stat_store);
347
sbi_dcache_precision_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)348 static ssize_t sbi_dcache_precision_show(struct kobject *kobj,
349 struct sbi_attribute *attr, char *buf)
350 {
351 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->dcache_precision);
352 }
353
354 #define PRECISION_MAX 3600000
355
sbi_dcache_precision_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)356 static ssize_t sbi_dcache_precision_store(struct kobject *kobj,
357 struct sbi_attribute *attr,
358 const char *buf, size_t len)
359 {
360 int ret;
361 unsigned int precision;
362 struct hmdfs_sb_info *sbi = to_sbi(kobj);
363
364 ret = kstrtouint(skip_spaces(buf), 0, &precision);
365 if (!ret) {
366 if (precision <= PRECISION_MAX)
367 sbi->dcache_precision = precision;
368 else
369 ret = -EINVAL;
370 }
371
372 return ret ? ret : len;
373 }
374
375 static struct sbi_attribute sbi_dcache_precision_attr =
376 __ATTR(dcache_precision, 0664, sbi_dcache_precision_show,
377 sbi_dcache_precision_store);
378
sbi_dcache_threshold_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)379 static ssize_t sbi_dcache_threshold_show(struct kobject *kobj,
380 struct sbi_attribute *attr, char *buf)
381 {
382 return snprintf(buf, PAGE_SIZE, "%lu\n",
383 to_sbi(kobj)->dcache_threshold);
384 }
385
sbi_dcache_threshold_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)386 static ssize_t sbi_dcache_threshold_store(struct kobject *kobj,
387 struct sbi_attribute *attr,
388 const char *buf, size_t len)
389 {
390 int ret;
391 unsigned long threshold;
392 struct hmdfs_sb_info *sbi = to_sbi(kobj);
393
394 ret = kstrtoul(skip_spaces(buf), 0, &threshold);
395 if (!ret)
396 sbi->dcache_threshold = threshold;
397
398 return ret ? ret : len;
399 }
400
401 static struct sbi_attribute sbi_dcache_threshold_attr =
402 __ATTR(dcache_threshold, 0664, sbi_dcache_threshold_show,
403 sbi_dcache_threshold_store);
404
server_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)405 static ssize_t server_statistic_show(struct kobject *kobj,
406 struct sbi_attribute *attr, char *buf)
407 {
408 int i, ret;
409 const size_t size = PAGE_SIZE - 1;
410 ssize_t pos = 0;
411 struct server_statistic *stat = to_sbi(kobj)->s_server_statis;
412
413 for (i = 0; i < F_SIZE; i++) {
414
415 ret = snprintf(buf + pos, size - pos,
416 "%llu %u %llu %llu\n",
417 stat[i].cnt,
418 jiffies_to_msecs(stat[i].max),
419 stat[i].snd_cnt, stat[i].snd_fail_cnt);
420 if (ret > size - pos)
421 break;
422 pos += ret;
423 }
424
425 /* If break, we should add a new line */
426 if (i < F_SIZE) {
427 ret = snprintf(buf + pos, size + 1 - pos, "\n");
428 pos += ret;
429 }
430 return pos;
431 }
432
433 static struct sbi_attribute sbi_local_op_attr = __ATTR_RO(server_statistic);
434
client_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)435 static ssize_t client_statistic_show(struct kobject *kobj,
436 struct sbi_attribute *attr, char *buf)
437 {
438 int i, ret;
439 const size_t size = PAGE_SIZE - 1;
440 ssize_t pos = 0;
441 struct client_statistic *stat = to_sbi(kobj)->s_client_statis;
442
443 for (i = 0; i < F_SIZE; i++) {
444
445 ret = snprintf(buf + pos, size - pos,
446 "%llu %llu %llu %llu %llu %u\n",
447 stat[i].snd_cnt,
448 stat[i].snd_fail_cnt,
449 stat[i].resp_cnt,
450 stat[i].timeout_cnt,
451 stat[i].delay_resp_cnt,
452 jiffies_to_msecs(stat[i].max));
453 if (ret > size - pos)
454 break;
455 pos += ret;
456 }
457
458 /* If break, we should add a new line */
459 if (i < F_SIZE) {
460 ret = snprintf(buf + pos, size + 1 - pos, "\n");
461 pos += ret;
462 }
463
464 return pos;
465 }
466
467 static struct sbi_attribute sbi_delay_resp_attr = __ATTR_RO(client_statistic);
468
pages_to_kbytes(unsigned long page)469 static inline unsigned long pages_to_kbytes(unsigned long page)
470 {
471 return page << (PAGE_SHIFT - 10);
472 }
473
dirty_writeback_stats_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)474 static ssize_t dirty_writeback_stats_show(struct kobject *kobj,
475 struct sbi_attribute *attr,
476 char *buf)
477 {
478 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
479 struct hmdfs_writeback *hwb = sbi->h_wb;
480 unsigned long avg;
481 unsigned long max;
482 unsigned long min;
483
484 spin_lock(&hwb->write_bandwidth_lock);
485 avg = hwb->avg_write_bandwidth;
486 max = hwb->max_write_bandwidth;
487 min = hwb->min_write_bandwidth;
488 spin_unlock(&hwb->write_bandwidth_lock);
489
490 if (min == ULONG_MAX)
491 min = 0;
492
493 return snprintf(buf, PAGE_SIZE,
494 "%10lu\n"
495 "%10lu\n"
496 "%10lu\n",
497 pages_to_kbytes(avg),
498 pages_to_kbytes(max),
499 pages_to_kbytes(min));
500 }
501
502 static struct sbi_attribute sbi_dirty_writeback_stats_attr =
503 __ATTR_RO(dirty_writeback_stats);
504
sbi_wb_timeout_ms_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)505 static ssize_t sbi_wb_timeout_ms_show(struct kobject *kobj,
506 struct sbi_attribute *attr,
507 char *buf)
508 {
509 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
510
511 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->wb_timeout_ms);
512 }
513
sbi_wb_timeout_ms_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)514 static ssize_t sbi_wb_timeout_ms_store(struct kobject *kobj,
515 struct sbi_attribute *attr,
516 const char *buf, size_t len)
517 {
518 struct hmdfs_sb_info *sbi = to_sbi(kobj);
519 unsigned int val;
520 int err;
521
522 err = kstrtouint(buf, 10, &val);
523 if (err)
524 return err;
525
526 if (!val || val > HMDFS_MAX_WB_TIMEOUT_MS)
527 return -EINVAL;
528
529 sbi->wb_timeout_ms = val;
530
531 return len;
532 }
533
534 static struct sbi_attribute sbi_wb_timeout_ms_attr =
535 __ATTR(wb_timeout_ms, 0664, sbi_wb_timeout_ms_show,
536 sbi_wb_timeout_ms_store);
537
sbi_dirty_writeback_centisecs_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)538 static ssize_t sbi_dirty_writeback_centisecs_show(struct kobject *kobj,
539 struct sbi_attribute *attr,
540 char *buf)
541 {
542 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
543
544 return snprintf(buf, PAGE_SIZE, "%u\n",
545 sbi->h_wb->dirty_writeback_interval);
546 }
547
sbi_dirty_writeback_centisecs_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)548 static ssize_t sbi_dirty_writeback_centisecs_store(struct kobject *kobj,
549 struct sbi_attribute *attr,
550 const char *buf, size_t len)
551 {
552 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
553 int err;
554
555 err = kstrtouint(buf, 10, &sbi->h_wb->dirty_writeback_interval);
556 if (err)
557 return err;
558 return len;
559 }
560
561 static struct sbi_attribute sbi_dirty_writeback_centisecs_attr =
562 __ATTR(dirty_writeback_centisecs, 0664,
563 sbi_dirty_writeback_centisecs_show,
564 sbi_dirty_writeback_centisecs_store);
565
sbi_dirty_file_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)566 static ssize_t sbi_dirty_file_background_bytes_show(struct kobject *kobj,
567 struct sbi_attribute *attr,
568 char *buf)
569 {
570 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
571
572 return snprintf(buf, PAGE_SIZE, "%lu\n",
573 sbi->h_wb->dirty_file_bg_bytes);
574 }
575
sbi_dirty_file_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)576 static ssize_t sbi_dirty_file_background_bytes_store(struct kobject *kobj,
577 struct sbi_attribute *attr,
578 const char *buf,
579 size_t len)
580 {
581 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
582 unsigned long file_background_bytes = 0;
583 int err;
584
585 err = kstrtoul(buf, 10, &file_background_bytes);
586 if (err)
587 return err;
588 if (file_background_bytes == 0)
589 return -EINVAL;
590
591 sbi->h_wb->dirty_fs_bytes =
592 max(sbi->h_wb->dirty_fs_bytes, file_background_bytes);
593 sbi->h_wb->dirty_fs_bg_bytes =
594 max(sbi->h_wb->dirty_fs_bg_bytes, file_background_bytes);
595 sbi->h_wb->dirty_file_bytes =
596 max(sbi->h_wb->dirty_file_bytes, file_background_bytes);
597
598 sbi->h_wb->dirty_file_bg_bytes = file_background_bytes;
599 hmdfs_calculate_dirty_thresh(sbi->h_wb);
600 hmdfs_update_ratelimit(sbi->h_wb);
601 return len;
602 }
603
sbi_dirty_fs_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)604 static ssize_t sbi_dirty_fs_background_bytes_show(struct kobject *kobj,
605 struct sbi_attribute *attr,
606 char *buf)
607 {
608 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
609
610 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bg_bytes);
611 }
612
sbi_dirty_fs_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)613 static ssize_t sbi_dirty_fs_background_bytes_store(struct kobject *kobj,
614 struct sbi_attribute *attr,
615 const char *buf, size_t len)
616 {
617 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
618 unsigned long fs_background_bytes = 0;
619 int err;
620
621 err = kstrtoul(buf, 10, &fs_background_bytes);
622 if (err)
623 return err;
624 if (fs_background_bytes == 0)
625 return -EINVAL;
626
627 sbi->h_wb->dirty_file_bg_bytes =
628 min(sbi->h_wb->dirty_file_bg_bytes, fs_background_bytes);
629 sbi->h_wb->dirty_fs_bytes =
630 max(sbi->h_wb->dirty_fs_bytes, fs_background_bytes);
631
632 sbi->h_wb->dirty_fs_bg_bytes = fs_background_bytes;
633 hmdfs_calculate_dirty_thresh(sbi->h_wb);
634 hmdfs_update_ratelimit(sbi->h_wb);
635 return len;
636 }
637
638 static struct sbi_attribute sbi_dirty_file_background_bytes_attr =
639 __ATTR(dirty_file_background_bytes, 0644,
640 sbi_dirty_file_background_bytes_show,
641 sbi_dirty_file_background_bytes_store);
642 static struct sbi_attribute sbi_dirty_fs_background_bytes_attr =
643 __ATTR(dirty_fs_background_bytes, 0644,
644 sbi_dirty_fs_background_bytes_show,
645 sbi_dirty_fs_background_bytes_store);
646
sbi_dirty_file_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)647 static ssize_t sbi_dirty_file_bytes_show(struct kobject *kobj,
648 struct sbi_attribute *attr, char *buf)
649 {
650 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
651
652 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_file_bytes);
653 }
654
sbi_dirty_file_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)655 static ssize_t sbi_dirty_file_bytes_store(struct kobject *kobj,
656 struct sbi_attribute *attr,
657 const char *buf, size_t len)
658 {
659 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
660 unsigned long file_bytes = 0;
661 int err;
662
663 err = kstrtoul(buf, 10, &file_bytes);
664 if (err)
665 return err;
666 if (file_bytes == 0)
667 return -EINVAL;
668
669 sbi->h_wb->dirty_file_bg_bytes =
670 min(sbi->h_wb->dirty_file_bg_bytes, file_bytes);
671 sbi->h_wb->dirty_fs_bytes = max(sbi->h_wb->dirty_fs_bytes, file_bytes);
672
673 sbi->h_wb->dirty_file_bytes = file_bytes;
674 hmdfs_calculate_dirty_thresh(sbi->h_wb);
675 hmdfs_update_ratelimit(sbi->h_wb);
676 return len;
677 }
678
sbi_dirty_fs_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)679 static ssize_t sbi_dirty_fs_bytes_show(struct kobject *kobj,
680 struct sbi_attribute *attr, char *buf)
681 {
682 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
683
684 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bytes);
685 }
686
sbi_dirty_fs_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)687 static ssize_t sbi_dirty_fs_bytes_store(struct kobject *kobj,
688 struct sbi_attribute *attr,
689 const char *buf, size_t len)
690 {
691 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
692 unsigned long fs_bytes = 0;
693 int err;
694
695 err = kstrtoul(buf, 10, &fs_bytes);
696 if (err)
697 return err;
698 if (fs_bytes == 0)
699 return -EINVAL;
700
701 sbi->h_wb->dirty_file_bg_bytes =
702 min(sbi->h_wb->dirty_file_bg_bytes, fs_bytes);
703 sbi->h_wb->dirty_file_bytes =
704 min(sbi->h_wb->dirty_file_bytes, fs_bytes);
705 sbi->h_wb->dirty_fs_bg_bytes =
706 min(sbi->h_wb->dirty_fs_bg_bytes, fs_bytes);
707
708 sbi->h_wb->dirty_fs_bytes = fs_bytes;
709 hmdfs_calculate_dirty_thresh(sbi->h_wb);
710 hmdfs_update_ratelimit(sbi->h_wb);
711 return len;
712 }
713
714 static struct sbi_attribute sbi_dirty_file_bytes_attr =
715 __ATTR(dirty_file_bytes, 0644, sbi_dirty_file_bytes_show,
716 sbi_dirty_file_bytes_store);
717 static struct sbi_attribute sbi_dirty_fs_bytes_attr =
718 __ATTR(dirty_fs_bytes, 0644, sbi_dirty_fs_bytes_show,
719 sbi_dirty_fs_bytes_store);
720
sbi_dirty_writeback_timelimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)721 static ssize_t sbi_dirty_writeback_timelimit_show(struct kobject *kobj,
722 struct sbi_attribute *attr,
723 char *buf)
724 {
725 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
726
727 return snprintf(buf, PAGE_SIZE, "%u\n",
728 sbi->h_wb->writeback_timelimit / HZ);
729 }
730
sbi_dirty_writeback_timelimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)731 static ssize_t sbi_dirty_writeback_timelimit_store(struct kobject *kobj,
732 struct sbi_attribute *attr,
733 const char *buf,
734 size_t len)
735 {
736 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
737 unsigned int time_limit = 0;
738 int err;
739
740 err = kstrtouint(buf, 10, &time_limit);
741 if (err)
742 return err;
743 if (time_limit == 0 || time_limit > (HMDFS_MAX_WB_TIMELIMIT / HZ))
744 return -EINVAL;
745
746 sbi->h_wb->writeback_timelimit = time_limit * HZ;
747 return len;
748 }
749
750 static struct sbi_attribute sbi_dirty_writeback_timelimit_attr =
751 __ATTR(dirty_writeback_timelimit, 0644, sbi_dirty_writeback_timelimit_show,
752 sbi_dirty_writeback_timelimit_store);
753
sbi_dirty_thresh_lowerlimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)754 static ssize_t sbi_dirty_thresh_lowerlimit_show(struct kobject *kobj,
755 struct sbi_attribute *attr,
756 char *buf)
757 {
758 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
759
760 return snprintf(buf, PAGE_SIZE, "%lu\n",
761 sbi->h_wb->bw_thresh_lowerlimit << PAGE_SHIFT);
762 }
763
sbi_dirty_thresh_lowerlimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)764 static ssize_t sbi_dirty_thresh_lowerlimit_store(struct kobject *kobj,
765 struct sbi_attribute *attr,
766 const char *buf,
767 size_t len)
768 {
769 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
770 unsigned long bw_thresh_lowerbytes = 0;
771 unsigned long bw_thresh_lowerlimit;
772 int err;
773
774 err = kstrtoul(buf, 10, &bw_thresh_lowerbytes);
775 if (err)
776 return err;
777
778 bw_thresh_lowerlimit = DIV_ROUND_UP(bw_thresh_lowerbytes, PAGE_SIZE);
779 if (bw_thresh_lowerlimit < HMDFS_BW_THRESH_MIN_LIMIT ||
780 bw_thresh_lowerlimit > HMDFS_BW_THRESH_MAX_LIMIT)
781 return -EINVAL;
782
783 sbi->h_wb->bw_thresh_lowerlimit = bw_thresh_lowerlimit;
784 return len;
785 }
786
787 static struct sbi_attribute sbi_dirty_thresh_lowerlimit_attr =
788 __ATTR(dirty_thresh_lowerlimit, 0644, sbi_dirty_thresh_lowerlimit_show,
789 sbi_dirty_thresh_lowerlimit_store);
790
sbi_dirty_writeback_autothresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)791 static ssize_t sbi_dirty_writeback_autothresh_show(struct kobject *kobj,
792 struct sbi_attribute *attr,
793 char *buf)
794 {
795 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
796
797 return snprintf(buf, PAGE_SIZE, "%d\n",
798 sbi->h_wb->dirty_auto_threshold);
799 }
800
sbi_dirty_writeback_autothresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)801 static ssize_t sbi_dirty_writeback_autothresh_store(struct kobject *kobj,
802 struct sbi_attribute *attr,
803 const char *buf,
804 size_t len)
805 {
806 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
807 bool dirty_auto_threshold = false;
808 int err;
809
810 err = kstrtobool(buf, &dirty_auto_threshold);
811 if (err)
812 return err;
813
814 sbi->h_wb->dirty_auto_threshold = dirty_auto_threshold;
815 return len;
816 }
817
818 static struct sbi_attribute sbi_dirty_writeback_autothresh_attr =
819 __ATTR(dirty_writeback_autothresh, 0644, sbi_dirty_writeback_autothresh_show,
820 sbi_dirty_writeback_autothresh_store);
821
sbi_dirty_writeback_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)822 static ssize_t sbi_dirty_writeback_control_show(struct kobject *kobj,
823 struct sbi_attribute *attr,
824 char *buf)
825 {
826 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
827
828 return snprintf(buf, PAGE_SIZE, "%d\n",
829 sbi->h_wb->dirty_writeback_control);
830 }
831
sbi_dirty_writeback_control_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)832 static ssize_t sbi_dirty_writeback_control_store(struct kobject *kobj,
833 struct sbi_attribute *attr,
834 const char *buf, size_t len)
835 {
836 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
837 unsigned int dirty_writeback_control = 0;
838 int err;
839
840 err = kstrtouint(buf, 10, &dirty_writeback_control);
841 if (err)
842 return err;
843
844 sbi->h_wb->dirty_writeback_control = (bool)dirty_writeback_control;
845 return len;
846 }
847
848 static struct sbi_attribute sbi_dirty_writeback_control_attr =
849 __ATTR(dirty_writeback_control, 0644, sbi_dirty_writeback_control_show,
850 sbi_dirty_writeback_control_store);
851
sbi_srv_dirty_thresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)852 static ssize_t sbi_srv_dirty_thresh_show(struct kobject *kobj,
853 struct sbi_attribute *attr,
854 char *buf)
855 {
856 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
857
858 return snprintf(buf, PAGE_SIZE, "%d\n",
859 sbi->h_swb->dirty_thresh_pg >> HMDFS_MB_TO_PAGE_SHIFT);
860 }
861
sbi_srv_dirty_thresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)862 static ssize_t sbi_srv_dirty_thresh_store(struct kobject *kobj,
863 struct sbi_attribute *attr,
864 const char *buf,
865 size_t len)
866 {
867 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
868 int dirty_thresh_mb;
869 unsigned long long pages;
870 int err;
871
872 err = kstrtoint(buf, 10, &dirty_thresh_mb);
873 if (err)
874 return err;
875
876 if (dirty_thresh_mb <= 0)
877 return -EINVAL;
878
879 pages = dirty_thresh_mb;
880 pages <<= HMDFS_MB_TO_PAGE_SHIFT;
881 if (pages > INT_MAX) {
882 hmdfs_err("Illegal dirty_thresh_mb %d, its page count beyonds max int",
883 dirty_thresh_mb);
884 return -EINVAL;
885 }
886
887 hswb->dirty_thresh_pg = (unsigned int)pages;
888 return len;
889 }
890
891 static struct sbi_attribute sbi_srv_dirty_thresh_attr =
892 __ATTR(srv_dirty_thresh, 0644, sbi_srv_dirty_thresh_show,
893 sbi_srv_dirty_thresh_store);
894
895
sbi_srv_dirty_wb_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)896 static ssize_t sbi_srv_dirty_wb_control_show(struct kobject *kobj,
897 struct sbi_attribute *attr,
898 char *buf)
899 {
900 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
901
902 return snprintf(buf, PAGE_SIZE, "%d\n",
903 sbi->h_swb->dirty_writeback_control);
904 }
905
sbi_srv_dirty_wb_conctrol_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)906 static ssize_t sbi_srv_dirty_wb_conctrol_store(struct kobject *kobj,
907 struct sbi_attribute *attr,
908 const char *buf,
909 size_t len)
910 {
911 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
912 bool dirty_writeback_control = true;
913 int err;
914
915 err = kstrtobool(buf, &dirty_writeback_control);
916 if (err)
917 return err;
918
919 hswb->dirty_writeback_control = dirty_writeback_control;
920
921 return len;
922 }
923
924 static struct sbi_attribute sbi_srv_dirty_wb_control_attr =
925 __ATTR(srv_dirty_writeback_control, 0644, sbi_srv_dirty_wb_control_show,
926 sbi_srv_dirty_wb_conctrol_store);
927
sbi_dcache_timeout_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)928 static ssize_t sbi_dcache_timeout_show(struct kobject *kobj,
929 struct sbi_attribute *attr, char *buf)
930 {
931 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
932
933 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->dcache_timeout);
934 }
935
sbi_dcache_timeout_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)936 static ssize_t sbi_dcache_timeout_store(struct kobject *kobj,
937 struct sbi_attribute *attr,
938 const char *buf, size_t len)
939 {
940 struct hmdfs_sb_info *sbi = to_sbi(kobj);
941 unsigned int timeout;
942 int err;
943
944 err = kstrtouint(buf, 0, &timeout);
945 if (err)
946 return err;
947
948 /* zero is invalid, and it doesn't mean no cache */
949 if (timeout == 0 || timeout > MAX_DCACHE_TIMEOUT)
950 return -EINVAL;
951
952 sbi->dcache_timeout = timeout;
953
954 return len;
955 }
956
957 static struct sbi_attribute sbi_dcache_timeout_attr =
958 __ATTR(dcache_timeout, 0644, sbi_dcache_timeout_show,
959 sbi_dcache_timeout_store);
960
sbi_write_cache_timeout_sec_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)961 static ssize_t sbi_write_cache_timeout_sec_show(struct kobject *kobj,
962 struct sbi_attribute *attr, char *buf)
963 {
964 return snprintf(buf, PAGE_SIZE, "%u\n",
965 to_sbi(kobj)->write_cache_timeout);
966 }
967
sbi_write_cache_timeout_sec_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)968 static ssize_t sbi_write_cache_timeout_sec_store(struct kobject *kobj,
969 struct sbi_attribute *attr, const char *buf, size_t len)
970 {
971 int ret;
972 unsigned int timeout;
973 struct hmdfs_sb_info *sbi = to_sbi(kobj);
974
975 ret = kstrtouint(buf, 0, &timeout);
976 if (ret)
977 return ret;
978
979 /* set write_cache_timeout to 0 means this functionality is disabled */
980 sbi->write_cache_timeout = timeout;
981
982 return len;
983 }
984
985 static struct sbi_attribute sbi_write_cache_timeout_sec_attr =
986 __ATTR(write_cache_timeout_sec, 0664, sbi_write_cache_timeout_sec_show,
987 sbi_write_cache_timeout_sec_store);
988
sbi_node_evt_cb_delay_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)989 static ssize_t sbi_node_evt_cb_delay_show(struct kobject *kobj,
990 struct sbi_attribute *attr,
991 char *buf)
992 {
993 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
994
995 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_cb_delay);
996 }
997
sbi_node_evt_cb_delay_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)998 static ssize_t sbi_node_evt_cb_delay_store(struct kobject *kobj,
999 struct sbi_attribute *attr,
1000 const char *buf,
1001 size_t len)
1002 {
1003 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1004 unsigned int delay = 0;
1005 int err;
1006
1007 err = kstrtouint(buf, 10, &delay);
1008 if (err)
1009 return err;
1010
1011 sbi->async_cb_delay = delay;
1012
1013 return len;
1014 }
1015
1016 static struct sbi_attribute sbi_node_evt_cb_delay_attr =
1017 __ATTR(node_event_delay, 0644, sbi_node_evt_cb_delay_show,
1018 sbi_node_evt_cb_delay_store);
1019
calc_idr_number(struct idr * idr)1020 static int calc_idr_number(struct idr *idr)
1021 {
1022 void *entry = NULL;
1023 int id;
1024 int number = 0;
1025
1026 idr_for_each_entry(idr, entry, id) {
1027 number++;
1028 if (number % HMDFS_IDR_RESCHED_COUNT == 0)
1029 cond_resched();
1030 }
1031
1032 return number;
1033 }
1034
sbi_show_idr_stats(struct kobject * kobj,struct sbi_attribute * attr,char * buf,bool showmsg)1035 static ssize_t sbi_show_idr_stats(struct kobject *kobj,
1036 struct sbi_attribute *attr,
1037 char *buf, bool showmsg)
1038 {
1039 ssize_t size = 0;
1040 int count;
1041 struct hmdfs_sb_info *sbi = NULL;
1042 struct hmdfs_peer *peer = NULL;
1043 struct idr *idr = NULL;
1044
1045 sbi = to_sbi(kobj);
1046
1047 mutex_lock(&sbi->connections.node_lock);
1048 list_for_each_entry(peer, &sbi->connections.node_list, list) {
1049 idr = showmsg ? &peer->msg_idr : &peer->file_id_idr;
1050 count = calc_idr_number(idr);
1051 size += snprintf(buf + size, PAGE_SIZE - size,
1052 "device-id\tcount\tnext-id\n\t%llu\t\t%d\t%u\n",
1053 peer->device_id, count, idr_get_cursor(idr));
1054 if (size >= PAGE_SIZE) {
1055 size = PAGE_SIZE;
1056 break;
1057 }
1058 }
1059 mutex_unlock(&sbi->connections.node_lock);
1060
1061 return size;
1062 }
1063
pending_message_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1064 static ssize_t pending_message_show(struct kobject *kobj,
1065 struct sbi_attribute *attr,
1066 char *buf)
1067 {
1068 return sbi_show_idr_stats(kobj, attr, buf, true);
1069 }
1070
1071 static struct sbi_attribute sbi_pending_message_attr =
1072 __ATTR_RO(pending_message);
1073
peer_opened_fd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1074 static ssize_t peer_opened_fd_show(struct kobject *kobj,
1075 struct sbi_attribute *attr, char *buf)
1076 {
1077 return sbi_show_idr_stats(kobj, attr, buf, false);
1078 }
1079
1080 static struct sbi_attribute sbi_peer_opened_fd_attr = __ATTR_RO(peer_opened_fd);
1081
sbi_srv_req_max_active_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1082 static ssize_t sbi_srv_req_max_active_attr_show(struct kobject *kobj,
1083 struct sbi_attribute *attr,
1084 char *buf)
1085 {
1086 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
1087
1088 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_req_max_active);
1089 }
1090
sbi_srv_req_max_active_attr_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)1091 static ssize_t sbi_srv_req_max_active_attr_store(struct kobject *kobj,
1092 struct sbi_attribute *attr, const char *buf, size_t len)
1093 {
1094 int ret;
1095 unsigned int max_active;
1096 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1097
1098 ret = kstrtouint(buf, 0, &max_active);
1099 if (ret)
1100 return ret;
1101
1102 sbi->async_req_max_active = max_active;
1103
1104 return len;
1105 }
1106
1107 static struct sbi_attribute sbi_srv_req_max_active_attr =
1108 __ATTR(srv_req_handle_max_active, 0644, sbi_srv_req_max_active_attr_show,
1109 sbi_srv_req_max_active_attr_store);
1110
1111
cache_file_show(struct hmdfs_sb_info * sbi,struct list_head * head,char * buf)1112 static ssize_t cache_file_show(struct hmdfs_sb_info *sbi,
1113 struct list_head *head, char *buf)
1114 {
1115 struct cache_file_node *cfn = NULL;
1116 ssize_t pos = 0;
1117
1118 mutex_lock(&sbi->cache_list_lock);
1119 list_for_each_entry(cfn, head, list) {
1120 pos += snprintf(buf + pos, PAGE_SIZE - pos,
1121 "dev_id: %s relative_path: %s\n",
1122 cfn->cid, cfn->relative_path);
1123 if (pos >= PAGE_SIZE) {
1124 pos = PAGE_SIZE;
1125 break;
1126 }
1127 }
1128 mutex_unlock(&sbi->cache_list_lock);
1129
1130 return pos;
1131 }
1132
client_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1133 static ssize_t client_cache_file_show(struct kobject *kobj,
1134 struct sbi_attribute *attr, char *buf)
1135 {
1136 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->client_cache, buf);
1137 }
server_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1138 static ssize_t server_cache_file_show(struct kobject *kobj,
1139 struct sbi_attribute *attr, char *buf)
1140 {
1141 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->server_cache, buf);
1142 }
1143
1144 static struct sbi_attribute sbi_server_cache_file_attr =
1145 __ATTR_RO(server_cache_file);
1146 static struct sbi_attribute sbi_client_cache_file_attr =
1147 __ATTR_RO(client_cache_file);
1148
sb_seq_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1149 static ssize_t sb_seq_show(struct kobject *kobj, struct sbi_attribute *attr,
1150 char *buf)
1151 {
1152 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->seq);
1153 }
1154
1155 static struct sbi_attribute sbi_seq_attr = __ATTR_RO(sb_seq);
1156
peers_sum_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1157 static ssize_t peers_sum_attr_show(struct kobject *kobj,
1158 struct sbi_attribute *attr, char *buf)
1159 {
1160 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1161 struct hmdfs_peer *node = NULL;
1162 unsigned int stash_ok = 0, stash_fail = 0, restore_ok = 0,
1163 restore_fail = 0, rebuild_ok = 0, rebuild_fail = 0, rebuild_invalid = 0,
1164 rebuild_time = 0;
1165 unsigned long long stash_ok_pages = 0, stash_fail_pages = 0,
1166 restore_ok_pages = 0, restore_fail_pages = 0;
1167
1168 mutex_lock(&sbi->connections.node_lock);
1169 list_for_each_entry(node, &sbi->connections.node_list, list) {
1170 peer_get(node);
1171 mutex_unlock(&sbi->connections.node_lock);
1172 stash_ok += node->stats.stash.total_ok;
1173 stash_fail += node->stats.stash.total_fail;
1174 stash_ok_pages += node->stats.stash.ok_pages;
1175 stash_fail_pages += node->stats.stash.fail_pages;
1176 restore_ok += node->stats.restore.total_ok;
1177 restore_fail += node->stats.restore.total_fail;
1178 restore_ok_pages += node->stats.restore.ok_pages;
1179 restore_fail_pages += node->stats.restore.fail_pages;
1180 rebuild_ok += node->stats.rebuild.total_ok;
1181 rebuild_fail += node->stats.rebuild.total_fail;
1182 rebuild_invalid += node->stats.rebuild.total_invalid;
1183 rebuild_time += node->stats.rebuild.time;
1184 peer_put(node);
1185 mutex_lock(&sbi->connections.node_lock);
1186 }
1187 mutex_unlock(&sbi->connections.node_lock);
1188
1189 return snprintf(buf, PAGE_SIZE,
1190 "%u %u %llu %llu\n"
1191 "%u %u %llu %llu\n"
1192 "%u %u %u %u\n",
1193 stash_ok, stash_fail, stash_ok_pages, stash_fail_pages,
1194 restore_ok, restore_fail, restore_ok_pages,
1195 restore_fail_pages, rebuild_ok, rebuild_fail,
1196 rebuild_invalid, rebuild_time);
1197 }
1198
1199 static struct sbi_attribute sbi_peers_attr = __ATTR_RO(peers_sum_attr);
1200
1201 const char * const flag_name[] = {
1202 "READPAGES",
1203 "READPAGES_OPEN",
1204 "ATOMIC_OPEN",
1205 };
1206
fill_features(char * buf,unsigned long long flag)1207 static ssize_t fill_features(char *buf, unsigned long long flag)
1208 {
1209 int i;
1210 ssize_t pos = 0;
1211 bool sep = false;
1212 int flag_name_count = ARRAY_SIZE(flag_name) / sizeof(flag_name[0]);
1213
1214 for (i = 0; i < sizeof(flag) * BITS_PER_BYTE; ++i) {
1215 if (!(flag & BIT(i)))
1216 continue;
1217
1218 if (sep)
1219 pos += snprintf(buf + pos, PAGE_SIZE - pos, "|");
1220 sep = true;
1221
1222 if (pos >= PAGE_SIZE) {
1223 pos = PAGE_SIZE;
1224 break;
1225 }
1226
1227 if (i < flag_name_count && flag_name[i])
1228 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%s",
1229 flag_name[i]);
1230 else
1231 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d", i);
1232
1233 if (pos >= PAGE_SIZE) {
1234 pos = PAGE_SIZE;
1235 break;
1236 }
1237 }
1238 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1239 if (pos >= PAGE_SIZE)
1240 pos = PAGE_SIZE;
1241
1242 return pos;
1243 }
1244
sbi_features_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1245 static ssize_t sbi_features_show(struct kobject *kobj,
1246 struct sbi_attribute *attr, char *buf)
1247 {
1248 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1249
1250 return fill_features(buf, sbi->s_features);
1251 }
1252
1253 static struct sbi_attribute sbi_features_attr = __ATTR(features, 0444,
1254 sbi_features_show, NULL);
1255
1256 static struct attribute *sbi_attrs[] = {
1257 &sbi_cmd_attr.attr,
1258 &sbi_status_attr.attr,
1259 &sbi_statistic_attr.attr,
1260 &sbi_dcache_precision_attr.attr,
1261 &sbi_dcache_threshold_attr.attr,
1262 &sbi_dcache_timeout_attr.attr,
1263 &sbi_write_cache_timeout_sec_attr.attr,
1264 &sbi_local_op_attr.attr,
1265 &sbi_delay_resp_attr.attr,
1266 &sbi_wb_timeout_ms_attr.attr,
1267 &sbi_dirty_writeback_centisecs_attr.attr,
1268 &sbi_dirty_file_background_bytes_attr.attr,
1269 &sbi_dirty_fs_background_bytes_attr.attr,
1270 &sbi_dirty_file_bytes_attr.attr,
1271 &sbi_dirty_fs_bytes_attr.attr,
1272 &sbi_dirty_writeback_autothresh_attr.attr,
1273 &sbi_dirty_writeback_timelimit_attr.attr,
1274 &sbi_dirty_thresh_lowerlimit_attr.attr,
1275 &sbi_dirty_writeback_control_attr.attr,
1276 &sbi_dirty_writeback_stats_attr.attr,
1277 &sbi_srv_dirty_thresh_attr.attr,
1278 &sbi_srv_dirty_wb_control_attr.attr,
1279 &sbi_node_evt_cb_delay_attr.attr,
1280 &sbi_srv_req_max_active_attr.attr,
1281 &sbi_pending_message_attr.attr,
1282 &sbi_peer_opened_fd_attr.attr,
1283 &sbi_server_cache_file_attr.attr,
1284 &sbi_client_cache_file_attr.attr,
1285 &sbi_seq_attr.attr,
1286 &sbi_peers_attr.attr,
1287 &sbi_features_attr.attr,
1288 NULL,
1289 };
1290
sbi_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1291 static ssize_t sbi_attr_show(struct kobject *kobj, struct attribute *attr,
1292 char *buf)
1293 {
1294 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1295
1296 if (!sbi_attr->show)
1297 return -EIO;
1298 return sbi_attr->show(kobj, sbi_attr, buf);
1299 }
1300
sbi_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1301 static ssize_t sbi_attr_store(struct kobject *kobj, struct attribute *attr,
1302 const char *buf, size_t len)
1303 {
1304 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1305
1306 if (!sbi_attr->store)
1307 return -EIO;
1308 return sbi_attr->store(kobj, sbi_attr, buf, len);
1309 }
1310
1311 static const struct sysfs_ops sbi_sysfs_ops = {
1312 .show = sbi_attr_show,
1313 .store = sbi_attr_store,
1314 };
1315
sbi_release(struct kobject * kobj)1316 static void sbi_release(struct kobject *kobj)
1317 {
1318 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1319
1320 complete(&sbi->s_kobj_unregister);
1321 }
1322
1323 static struct kobj_type sbi_ktype = {
1324 .sysfs_ops = &sbi_sysfs_ops,
1325 .default_attrs = sbi_attrs,
1326 .release = sbi_release,
1327 };
1328
to_sbi_cmd_attr(struct attribute * x)1329 static inline struct sbi_cmd_attribute *to_sbi_cmd_attr(struct attribute *x)
1330 {
1331 return container_of(x, struct sbi_cmd_attribute, attr);
1332 }
1333
cmd_kobj_to_sbi(struct kobject * x)1334 static inline struct hmdfs_sb_info *cmd_kobj_to_sbi(struct kobject *x)
1335 {
1336 return container_of(x, struct hmdfs_sb_info, s_cmd_timeout_kobj);
1337 }
1338
cmd_timeout_show(struct kobject * kobj,struct attribute * attr,char * buf)1339 static ssize_t cmd_timeout_show(struct kobject *kobj, struct attribute *attr,
1340 char *buf)
1341 {
1342 int cmd = to_sbi_cmd_attr(attr)->command;
1343 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1344
1345 if (cmd < 0 || cmd >= F_SIZE)
1346 return 0;
1347
1348 return snprintf(buf, PAGE_SIZE, "%u\n", get_cmd_timeout(sbi, cmd));
1349 }
1350
cmd_timeout_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1351 static ssize_t cmd_timeout_store(struct kobject *kobj, struct attribute *attr,
1352 const char *buf, size_t len)
1353 {
1354 unsigned int value;
1355 int cmd = to_sbi_cmd_attr(attr)->command;
1356 int ret = kstrtouint(skip_spaces(buf), 0, &value);
1357 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1358
1359 if (cmd < 0 || cmd >= F_SIZE)
1360 return -EINVAL;
1361
1362 if (!ret)
1363 set_cmd_timeout(sbi, cmd, value);
1364
1365 return ret ? ret : len;
1366 }
1367
1368 #define HMDFS_CMD_ATTR(_name, _cmd) \
1369 static struct sbi_cmd_attribute hmdfs_attr_##_name = { \
1370 .attr = { .name = __stringify(_name), .mode = 0664 }, \
1371 .command = (_cmd), \
1372 }
1373
1374 HMDFS_CMD_ATTR(open, F_OPEN);
1375 HMDFS_CMD_ATTR(release, F_RELEASE);
1376 HMDFS_CMD_ATTR(readpage, F_READPAGE);
1377 HMDFS_CMD_ATTR(writepage, F_WRITEPAGE);
1378 HMDFS_CMD_ATTR(iterate, F_ITERATE);
1379 HMDFS_CMD_ATTR(rmdir, F_RMDIR);
1380 HMDFS_CMD_ATTR(unlink, F_UNLINK);
1381 HMDFS_CMD_ATTR(rename, F_RENAME);
1382 HMDFS_CMD_ATTR(setattr, F_SETATTR);
1383 HMDFS_CMD_ATTR(statfs, F_STATFS);
1384 HMDFS_CMD_ATTR(drop_push, F_DROP_PUSH);
1385 HMDFS_CMD_ATTR(getattr, F_GETATTR);
1386 HMDFS_CMD_ATTR(fsync, F_FSYNC);
1387 HMDFS_CMD_ATTR(syncfs, F_SYNCFS);
1388 HMDFS_CMD_ATTR(getxattr, F_GETXATTR);
1389 HMDFS_CMD_ATTR(setxattr, F_SETXATTR);
1390 HMDFS_CMD_ATTR(listxattr, F_LISTXATTR);
1391
1392 #define ATTR_LIST(_name) (&hmdfs_attr_##_name.attr)
1393
1394 static struct attribute *sbi_timeout_attrs[] = {
1395 ATTR_LIST(open), ATTR_LIST(release),
1396 ATTR_LIST(readpage), ATTR_LIST(writepage),
1397 ATTR_LIST(iterate), ATTR_LIST(rmdir),
1398 ATTR_LIST(unlink), ATTR_LIST(rename),
1399 ATTR_LIST(setattr),
1400 ATTR_LIST(statfs), ATTR_LIST(drop_push),
1401 ATTR_LIST(getattr), ATTR_LIST(fsync),
1402 ATTR_LIST(syncfs), ATTR_LIST(getxattr),
1403 ATTR_LIST(setxattr), ATTR_LIST(listxattr),
1404 NULL
1405 };
1406
1407 static const struct sysfs_ops sbi_cmd_sysfs_ops = {
1408 .show = cmd_timeout_show,
1409 .store = cmd_timeout_store,
1410 };
1411
sbi_timeout_release(struct kobject * kobj)1412 static void sbi_timeout_release(struct kobject *kobj)
1413 {
1414 struct hmdfs_sb_info *sbi = container_of(kobj, struct hmdfs_sb_info,
1415 s_cmd_timeout_kobj);
1416
1417 complete(&sbi->s_timeout_kobj_unregister);
1418 }
1419
1420 static struct kobj_type sbi_timeout_ktype = {
1421 .sysfs_ops = &sbi_cmd_sysfs_ops,
1422 .default_attrs = sbi_timeout_attrs,
1423 .release = sbi_timeout_release,
1424 };
1425
hmdfs_release_sysfs(struct hmdfs_sb_info * sbi)1426 void hmdfs_release_sysfs(struct hmdfs_sb_info *sbi)
1427 {
1428 kobject_put(&sbi->s_cmd_timeout_kobj);
1429 wait_for_completion(&sbi->s_timeout_kobj_unregister);
1430 kobject_put(&sbi->kobj);
1431 wait_for_completion(&sbi->s_kobj_unregister);
1432 }
1433
hmdfs_register_sysfs(const char * name,struct hmdfs_sb_info * sbi)1434 int hmdfs_register_sysfs(const char *name, struct hmdfs_sb_info *sbi)
1435 {
1436 int ret;
1437 struct kobject *kobj = NULL;
1438
1439 mutex_lock(&hmdfs_sysfs_mutex);
1440 kobj = kset_find_obj(hmdfs_kset, name);
1441 if (kobj) {
1442 hmdfs_err("mount failed, already exist");
1443 kobject_put(kobj);
1444 mutex_unlock(&hmdfs_sysfs_mutex);
1445 return -EEXIST;
1446 }
1447
1448 sbi->kobj.kset = hmdfs_kset;
1449 init_completion(&sbi->s_kobj_unregister);
1450 ret = kobject_init_and_add(&sbi->kobj, &sbi_ktype,
1451 &hmdfs_kset->kobj, "%s", name);
1452 sysfs_change_owner(&sbi->kobj, KUIDT_INIT(1000), KGIDT_INIT(1000));
1453 mutex_unlock(&hmdfs_sysfs_mutex);
1454
1455 if (ret) {
1456 kobject_put(&sbi->kobj);
1457 wait_for_completion(&sbi->s_kobj_unregister);
1458 return ret;
1459 }
1460
1461 init_completion(&sbi->s_timeout_kobj_unregister);
1462 ret = kobject_init_and_add(&sbi->s_cmd_timeout_kobj, &sbi_timeout_ktype,
1463 &sbi->kobj, "cmd_timeout");
1464 if (ret) {
1465 hmdfs_release_sysfs(sbi);
1466 return ret;
1467 }
1468
1469 kobject_uevent(&sbi->kobj, KOBJ_ADD);
1470 return 0;
1471 }
1472
hmdfs_unregister_sysfs(struct hmdfs_sb_info * sbi)1473 void hmdfs_unregister_sysfs(struct hmdfs_sb_info *sbi)
1474 {
1475 kobject_del(&sbi->s_cmd_timeout_kobj);
1476 kobject_del(&sbi->kobj);
1477 }
1478
to_sysfs_fmt_evt(unsigned int evt)1479 static inline int to_sysfs_fmt_evt(unsigned int evt)
1480 {
1481 return evt == RAW_NODE_EVT_NR ? -1 : evt;
1482 }
1483
features_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1484 static ssize_t features_show(struct kobject *kobj, struct peer_attribute *attr,
1485 char *buf)
1486 {
1487 struct hmdfs_peer *peer = to_peer(kobj);
1488
1489 return fill_features(buf, peer->features);
1490 }
1491
event_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1492 static ssize_t event_show(struct kobject *kobj, struct peer_attribute *attr,
1493 char *buf)
1494 {
1495 struct hmdfs_peer *peer = to_peer(kobj);
1496
1497 return snprintf(buf, PAGE_SIZE,
1498 "cur_async evt %d seq %u\n"
1499 "cur_sync evt %d seq %u\n"
1500 "pending evt %d seq %u\n"
1501 "merged evt %u\n"
1502 "dup_drop evt %u %u\n"
1503 "waiting evt %u %u\n"
1504 "seq_tbl %u %u %u %u\n"
1505 "seq_rd_idx %u\n"
1506 "seq_wr_idx %u\n",
1507 to_sysfs_fmt_evt(peer->cur_evt[0]),
1508 peer->cur_evt_seq[0],
1509 to_sysfs_fmt_evt(peer->cur_evt[1]),
1510 peer->cur_evt_seq[1],
1511 to_sysfs_fmt_evt(peer->pending_evt),
1512 peer->pending_evt_seq,
1513 peer->merged_evt,
1514 peer->dup_evt[RAW_NODE_EVT_OFF],
1515 peer->dup_evt[RAW_NODE_EVT_ON],
1516 peer->waiting_evt[RAW_NODE_EVT_OFF],
1517 peer->waiting_evt[RAW_NODE_EVT_ON],
1518 peer->seq_tbl[0], peer->seq_tbl[1], peer->seq_tbl[2],
1519 peer->seq_tbl[3],
1520 peer->seq_rd_idx % RAW_NODE_EVT_MAX_NR,
1521 peer->seq_wr_idx % RAW_NODE_EVT_MAX_NR);
1522 }
1523
stash_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1524 static ssize_t stash_show(struct kobject *kobj, struct peer_attribute *attr,
1525 char *buf)
1526 {
1527 struct hmdfs_peer *peer = to_peer(kobj);
1528
1529 return snprintf(buf, PAGE_SIZE,
1530 "cur_ok %u\n"
1531 "cur_nothing %u\n"
1532 "cur_fail %u\n"
1533 "total_ok %u\n"
1534 "total_nothing %u\n"
1535 "total_fail %u\n"
1536 "ok_pages %llu\n"
1537 "fail_pages %llu\n",
1538 peer->stats.stash.cur_ok,
1539 peer->stats.stash.cur_nothing,
1540 peer->stats.stash.cur_fail,
1541 peer->stats.stash.total_ok,
1542 peer->stats.stash.total_nothing,
1543 peer->stats.stash.total_fail,
1544 peer->stats.stash.ok_pages,
1545 peer->stats.stash.fail_pages);
1546 }
1547
restore_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1548 static ssize_t restore_show(struct kobject *kobj, struct peer_attribute *attr,
1549 char *buf)
1550 {
1551 struct hmdfs_peer *peer = to_peer(kobj);
1552
1553 return snprintf(buf, PAGE_SIZE,
1554 "cur_ok %u\n"
1555 "cur_fail %u\n"
1556 "cur_keep %u\n"
1557 "total_ok %u\n"
1558 "total_fail %u\n"
1559 "total_keep %u\n"
1560 "ok_pages %llu\n"
1561 "fail_pages %llu\n",
1562 peer->stats.restore.cur_ok,
1563 peer->stats.restore.cur_fail,
1564 peer->stats.restore.cur_keep,
1565 peer->stats.restore.total_ok,
1566 peer->stats.restore.total_fail,
1567 peer->stats.restore.total_keep,
1568 peer->stats.restore.ok_pages,
1569 peer->stats.restore.fail_pages);
1570 }
1571
rebuild_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1572 static ssize_t rebuild_show(struct kobject *kobj, struct peer_attribute *attr,
1573 char *buf)
1574 {
1575 struct hmdfs_peer *peer = to_peer(kobj);
1576
1577 return snprintf(buf, PAGE_SIZE,
1578 "cur_ok %u\n"
1579 "cur_fail %u\n"
1580 "cur_invalid %u\n"
1581 "total_ok %u\n"
1582 "total_fail %u\n"
1583 "total_invalid %u\n"
1584 "time %u\n",
1585 peer->stats.rebuild.cur_ok,
1586 peer->stats.rebuild.cur_fail,
1587 peer->stats.rebuild.cur_invalid,
1588 peer->stats.rebuild.total_ok,
1589 peer->stats.rebuild.total_fail,
1590 peer->stats.rebuild.total_invalid,
1591 peer->stats.rebuild.time);
1592 }
1593
1594 static struct peer_attribute peer_features_attr = __ATTR_RO(features);
1595 static struct peer_attribute peer_event_attr = __ATTR_RO(event);
1596 static struct peer_attribute peer_stash_attr = __ATTR_RO(stash);
1597 static struct peer_attribute peer_restore_attr = __ATTR_RO(restore);
1598 static struct peer_attribute peer_rebuild_attr = __ATTR_RO(rebuild);
1599
1600 static struct attribute *peer_attrs[] = {
1601 &peer_features_attr.attr,
1602 &peer_event_attr.attr,
1603 &peer_stash_attr.attr,
1604 &peer_restore_attr.attr,
1605 &peer_rebuild_attr.attr,
1606 NULL,
1607 };
1608
peer_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1609 static ssize_t peer_attr_show(struct kobject *kobj, struct attribute *attr,
1610 char *buf)
1611 {
1612 struct peer_attribute *peer_attr = to_peer_attr(attr);
1613
1614 if (!peer_attr->show)
1615 return -EIO;
1616 return peer_attr->show(kobj, peer_attr, buf);
1617 }
1618
peer_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1619 static ssize_t peer_attr_store(struct kobject *kobj, struct attribute *attr,
1620 const char *buf, size_t len)
1621 {
1622 struct peer_attribute *peer_attr = to_peer_attr(attr);
1623
1624 if (!peer_attr->store)
1625 return -EIO;
1626 return peer_attr->store(kobj, peer_attr, buf, len);
1627 }
1628
1629 static const struct sysfs_ops peer_sysfs_ops = {
1630 .show = peer_attr_show,
1631 .store = peer_attr_store,
1632 };
1633
peer_sysfs_release(struct kobject * kobj)1634 static void peer_sysfs_release(struct kobject *kobj)
1635 {
1636 struct hmdfs_peer *peer = to_peer(kobj);
1637
1638 complete(&peer->kobj_unregister);
1639 }
1640
1641 static struct kobj_type peer_ktype = {
1642 .sysfs_ops = &peer_sysfs_ops,
1643 .default_attrs = peer_attrs,
1644 .release = peer_sysfs_release,
1645 };
1646
hmdfs_register_peer_sysfs(struct hmdfs_sb_info * sbi,struct hmdfs_peer * peer)1647 int hmdfs_register_peer_sysfs(struct hmdfs_sb_info *sbi,
1648 struct hmdfs_peer *peer)
1649 {
1650 int err = 0;
1651
1652 init_completion(&peer->kobj_unregister);
1653 err = kobject_init_and_add(&peer->kobj, &peer_ktype, &sbi->kobj,
1654 "peer_%llu", peer->device_id);
1655 return err;
1656 }
1657
hmdfs_release_peer_sysfs(struct hmdfs_peer * peer)1658 void hmdfs_release_peer_sysfs(struct hmdfs_peer *peer)
1659 {
1660 kobject_del(&peer->kobj);
1661 kobject_put(&peer->kobj);
1662 wait_for_completion(&peer->kobj_unregister);
1663 }
1664
notify(struct hmdfs_peer * node,struct notify_param * param)1665 void notify(struct hmdfs_peer *node, struct notify_param *param)
1666 {
1667 struct hmdfs_sb_info *sbi = node->sbi;
1668 int in_len;
1669
1670 if (!param)
1671 return;
1672 spin_lock(&sbi->notify_fifo_lock);
1673 in_len =
1674 kfifo_in(&sbi->notify_fifo, param, sizeof(struct notify_param));
1675 spin_unlock(&sbi->notify_fifo_lock);
1676 if (in_len != sizeof(struct notify_param))
1677 return;
1678 sysfs_notify(&sbi->kobj, NULL, "cmd");
1679 }
1680
hmdfs_sysfs_init(void)1681 int hmdfs_sysfs_init(void)
1682 {
1683 hmdfs_kset = kset_create_and_add("hmdfs", NULL, fs_kobj);
1684 if (!hmdfs_kset)
1685 return -ENOMEM;
1686
1687 return 0;
1688 }
1689
hmdfs_sysfs_exit(void)1690 void hmdfs_sysfs_exit(void)
1691 {
1692 kset_unregister(hmdfs_kset);
1693 hmdfs_kset = NULL;
1694 }
1695