1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/comm/device_node.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "device_node.h"
9
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kfifo.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/sysfs.h>
18 #include <linux/types.h>
19 #include <linux/backing-dev.h>
20
21 #include "client_writeback.h"
22 #include "server_writeback.h"
23 #include "connection.h"
24 #include "hmdfs_client.h"
25 #include "socket_adapter.h"
26 #include "authority/authentication.h"
27
28 DEFINE_MUTEX(hmdfs_sysfs_mutex);
29 static struct kset *hmdfs_kset;
30
ctrl_cmd_update_socket_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)31 static void ctrl_cmd_update_socket_handler(const char *buf, size_t len,
32 struct hmdfs_sb_info *sbi)
33 {
34 struct update_socket_param cmd;
35 struct hmdfs_peer *node = NULL;
36 struct connection *conn = NULL;
37
38 if (unlikely(!buf || len != sizeof(cmd))) {
39 hmdfs_err("len/buf error");
40 goto out;
41 }
42 memcpy(&cmd, buf, sizeof(cmd));
43 if (cmd.status != CONNECT_STAT_WAIT_REQUEST &&
44 cmd.status != CONNECT_STAT_WAIT_RESPONSE) {
45 hmdfs_err("invalid status");
46 goto out;
47 }
48
49 node = hmdfs_get_peer(sbi, cmd.cid, cmd.devsl);
50 if (unlikely(!node)) {
51 hmdfs_err("failed to update ctrl node: cannot get peer");
52 goto out;
53 }
54
55 conn = hmdfs_get_conn_tcp(node, cmd.newfd, cmd.masterkey, cmd.status);
56 if (unlikely(!conn)) {
57 hmdfs_err("failed to update ctrl node: cannot get conn");
58 } else if (!sbi->system_cred) {
59 const struct cred *system_cred = get_cred(current_cred());
60
61 if (cmpxchg_relaxed(&sbi->system_cred, NULL, system_cred))
62 put_cred(system_cred);
63 else
64 hmdfs_check_cred(system_cred);
65 }
66
67 if (conn)
68 connection_put(conn);
69 out:
70 if (node)
71 peer_put(node);
72 }
73
ctrl_cmd_update_devsl_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)74 static void ctrl_cmd_update_devsl_handler(const char *buf, size_t len,
75 struct hmdfs_sb_info *sbi)
76 {
77 struct update_devsl_param cmd;
78 struct hmdfs_peer *node = NULL;
79
80 if (unlikely(!buf || len != sizeof(cmd))) {
81 hmdfs_err("Recved a invalid userbuf");
82 return;
83 }
84 memcpy(&cmd, buf, sizeof(cmd));
85
86 node = hmdfs_lookup_from_cid(sbi, cmd.cid);
87 if (unlikely(!node)) {
88 hmdfs_err("failed to update devsl: cannot get peer");
89 return;
90 }
91 hmdfs_info("Found peer: device_id = %llu", node->device_id);
92 node->devsl = cmd.devsl;
93 peer_put(node);
94 }
95
hmdfs_disconnect_node_marked(struct hmdfs_peer * conn)96 static inline void hmdfs_disconnect_node_marked(struct hmdfs_peer *conn)
97 {
98 hmdfs_start_process_offline(conn);
99 hmdfs_disconnect_node(conn);
100 hmdfs_stop_process_offline(conn);
101 }
102
ctrl_cmd_off_line_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)103 static void ctrl_cmd_off_line_handler(const char *buf, size_t len,
104 struct hmdfs_sb_info *sbi)
105 {
106 struct offline_param cmd;
107 struct hmdfs_peer *node = NULL;
108
109 if (unlikely(!buf || len != sizeof(cmd))) {
110 hmdfs_err("Recved a invalid userbuf");
111 return;
112 }
113 memcpy(&cmd, buf, sizeof(cmd));
114 node = hmdfs_lookup_from_cid(sbi, cmd.remote_cid);
115 if (unlikely(!node)) {
116 hmdfs_err("Cannot find node by device");
117 return;
118 }
119 hmdfs_info("Found peer: device_id = %llu", node->device_id);
120 hmdfs_disconnect_node_marked(node);
121 peer_put(node);
122 }
123
124 typedef void (*ctrl_cmd_handler)(const char *buf, size_t len,
125 struct hmdfs_sb_info *sbi);
126
127 static const ctrl_cmd_handler cmd_handler[CMD_CNT] = {
128 [CMD_UPDATE_SOCKET] = ctrl_cmd_update_socket_handler,
129 [CMD_UPDATE_DEVSL] = ctrl_cmd_update_devsl_handler,
130 [CMD_OFF_LINE] = ctrl_cmd_off_line_handler,
131 };
132
sbi_cmd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)133 static ssize_t sbi_cmd_show(struct kobject *kobj, struct sbi_attribute *attr,
134 char *buf)
135 {
136 struct notify_param param;
137 int out_len;
138 struct hmdfs_sb_info *sbi = to_sbi(kobj);
139
140 memset(¶m, 0, sizeof(param));
141 spin_lock(&sbi->notify_fifo_lock);
142 out_len = kfifo_out(&sbi->notify_fifo, ¶m, sizeof(param));
143 spin_unlock(&sbi->notify_fifo_lock);
144 if (out_len != sizeof(param))
145 param.notify = NOTIFY_NONE;
146 memcpy(buf, ¶m, sizeof(param));
147 return sizeof(param);
148 }
149
cmd2str(int cmd)150 static const char *cmd2str(int cmd)
151 {
152 switch (cmd) {
153 case 0:
154 return "CMD_UPDATE_SOCKET";
155 case 1:
156 return "CMD_UPDATE_DEVSL";
157 case 2:
158 return "CMD_OFF_LINE";
159 default:
160 return "illegal cmd";
161 }
162 }
163
sbi_cmd_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)164 static ssize_t sbi_cmd_store(struct kobject *kobj, struct sbi_attribute *attr,
165 const char *buf, size_t len)
166 {
167 int cmd;
168 struct hmdfs_sb_info *sbi = to_sbi(kobj);
169
170 if (!sbi) {
171 hmdfs_info("Fatal! Empty sbi. Mount fs first");
172 return len;
173 }
174 if (len < sizeof(int)) {
175 hmdfs_err("Illegal cmd: cmd len = %zu", len);
176 return len;
177 }
178 cmd = *(int *)buf;
179 if (cmd < 0 || cmd >= CMD_CNT) {
180 hmdfs_err("Illegal cmd : cmd = %d", cmd);
181 return len;
182 }
183 mutex_lock(&sbi->cmd_handler_mutex);
184 hmdfs_info("Recved cmd: %s", cmd2str(cmd));
185 if (cmd_handler[cmd])
186 cmd_handler[cmd](buf, len, sbi);
187 mutex_unlock(&sbi->cmd_handler_mutex);
188 return len;
189 }
190
191 static struct sbi_attribute sbi_cmd_attr =
192 __ATTR(cmd, 0664, sbi_cmd_show, sbi_cmd_store);
193
sbi_status_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)194 static ssize_t sbi_status_show(struct kobject *kobj, struct sbi_attribute *attr,
195 char *buf)
196 {
197 ssize_t size = 0;
198 struct hmdfs_sb_info *sbi = NULL;
199 struct hmdfs_peer *peer = NULL;
200 struct connection *conn_impl = NULL;
201 struct tcp_handle *tcp = NULL;
202
203 sbi = to_sbi(kobj);
204 size += sprintf(buf + size, "peers status\n");
205
206 mutex_lock(&sbi->connections.node_lock);
207 list_for_each_entry(peer, &sbi->connections.node_list, list) {
208 size += sprintf(buf + size, "%s %d\n", peer->cid,
209 peer->status);
210 // connection information
211 size += sprintf(
212 buf + size,
213 "\t socket_fd connection_status tcp_status ... refcnt\n");
214 mutex_lock(&peer->conn_impl_list_lock);
215 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
216 tcp = conn_impl->connect_handle;
217 size += sprintf(buf + size, "\t %d \t%d \t%d \t%p \t%ld\n",
218 tcp->fd, conn_impl->status,
219 tcp->sock->state, tcp->sock, file_count(tcp->sock->file));
220 }
221 mutex_unlock(&peer->conn_impl_list_lock);
222 }
223 mutex_unlock(&sbi->connections.node_lock);
224 return size;
225 }
226
sbi_status_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)227 static ssize_t sbi_status_store(struct kobject *kobj,
228 struct sbi_attribute *attr, const char *buf,
229 size_t len)
230 {
231 return len;
232 }
233
234 static struct sbi_attribute sbi_status_attr =
235 __ATTR(status, 0664, sbi_status_show, sbi_status_store);
236
sbi_stat_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)237 static ssize_t sbi_stat_show(struct kobject *kobj, struct sbi_attribute *attr,
238 char *buf)
239 {
240 ssize_t size = 0;
241 struct hmdfs_sb_info *sbi = NULL;
242 struct hmdfs_peer *peer = NULL;
243 struct connection *conn_impl = NULL;
244 struct tcp_handle *tcp = NULL;
245
246 sbi = to_sbi(kobj);
247 mutex_lock(&sbi->connections.node_lock);
248 list_for_each_entry(peer, &sbi->connections.node_list, list) {
249 // connection information
250 mutex_lock(&peer->conn_impl_list_lock);
251 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
252 tcp = conn_impl->connect_handle;
253 size += sprintf(buf + size, "socket_fd: %d\n", tcp->fd);
254 size += sprintf(buf + size,
255 "\tsend_msg %d \tsend_bytes %llu\n",
256 conn_impl->stat.send_message_count,
257 conn_impl->stat.send_bytes);
258 size += sprintf(buf + size,
259 "\trecv_msg %d \trecv_bytes %llu\n",
260 conn_impl->stat.recv_message_count,
261 conn_impl->stat.recv_bytes);
262 }
263 mutex_unlock(&peer->conn_impl_list_lock);
264 }
265 mutex_unlock(&sbi->connections.node_lock);
266 return size;
267 }
268
sbi_stat_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)269 static ssize_t sbi_stat_store(struct kobject *kobj, struct sbi_attribute *attr,
270 const char *buf, size_t len)
271 {
272 struct hmdfs_sb_info *sbi = NULL;
273 struct hmdfs_peer *peer = NULL;
274 struct connection *conn_impl = NULL;
275
276 sbi = to_sbi(kobj);
277 mutex_lock(&sbi->connections.node_lock);
278 list_for_each_entry(peer, &sbi->connections.node_list, list) {
279 // connection information
280 mutex_lock(&peer->conn_impl_list_lock);
281 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
282 conn_impl->stat.send_message_count = 0;
283 conn_impl->stat.send_bytes = 0;
284 conn_impl->stat.recv_message_count = 0;
285 conn_impl->stat.recv_bytes = 0;
286 }
287 mutex_unlock(&peer->conn_impl_list_lock);
288 }
289 mutex_unlock(&sbi->connections.node_lock);
290 return len;
291 }
292
293 static struct sbi_attribute sbi_statistic_attr =
294 __ATTR(statistic, 0664, sbi_stat_show, sbi_stat_store);
295
sbi_dcache_precision_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)296 static ssize_t sbi_dcache_precision_show(struct kobject *kobj,
297 struct sbi_attribute *attr, char *buf)
298 {
299 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->dcache_precision);
300 }
301
302 #define PRECISION_MAX 3600000
303
sbi_dcache_precision_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)304 static ssize_t sbi_dcache_precision_store(struct kobject *kobj,
305 struct sbi_attribute *attr,
306 const char *buf, size_t len)
307 {
308 int ret;
309 unsigned int precision;
310 struct hmdfs_sb_info *sbi = to_sbi(kobj);
311
312 ret = kstrtouint(skip_spaces(buf), 0, &precision);
313 if (!ret) {
314 if (precision <= PRECISION_MAX)
315 sbi->dcache_precision = precision;
316 else
317 ret = -EINVAL;
318 }
319
320 return ret ? ret : len;
321 }
322
323 static struct sbi_attribute sbi_dcache_precision_attr =
324 __ATTR(dcache_precision, 0664, sbi_dcache_precision_show,
325 sbi_dcache_precision_store);
326
sbi_dcache_threshold_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)327 static ssize_t sbi_dcache_threshold_show(struct kobject *kobj,
328 struct sbi_attribute *attr, char *buf)
329 {
330 return snprintf(buf, PAGE_SIZE, "%lu\n",
331 to_sbi(kobj)->dcache_threshold);
332 }
333
sbi_dcache_threshold_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)334 static ssize_t sbi_dcache_threshold_store(struct kobject *kobj,
335 struct sbi_attribute *attr,
336 const char *buf, size_t len)
337 {
338 int ret;
339 unsigned long threshold;
340 struct hmdfs_sb_info *sbi = to_sbi(kobj);
341
342 ret = kstrtoul(skip_spaces(buf), 0, &threshold);
343 if (!ret)
344 sbi->dcache_threshold = threshold;
345
346 return ret ? ret : len;
347 }
348
349 static struct sbi_attribute sbi_dcache_threshold_attr =
350 __ATTR(dcache_threshold, 0664, sbi_dcache_threshold_show,
351 sbi_dcache_threshold_store);
352
server_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)353 static ssize_t server_statistic_show(struct kobject *kobj,
354 struct sbi_attribute *attr, char *buf)
355 {
356 int i, ret;
357 const size_t size = PAGE_SIZE - 1;
358 ssize_t pos = 0;
359 struct server_statistic *stat = to_sbi(kobj)->s_server_statis;
360
361 for (i = 0; i < F_SIZE; i++) {
362
363 ret = snprintf(buf + pos, size - pos,
364 "%llu %u %llu %llu\n",
365 stat[i].cnt,
366 jiffies_to_msecs(stat[i].max),
367 stat[i].snd_cnt, stat[i].snd_fail_cnt);
368 if (ret > size - pos)
369 break;
370 pos += ret;
371 }
372
373 /* If break, we should add a new line */
374 if (i < F_SIZE) {
375 ret = snprintf(buf + pos, size + 1 - pos, "\n");
376 pos += ret;
377 }
378 return pos;
379 }
380
381 static struct sbi_attribute sbi_local_op_attr = __ATTR_RO(server_statistic);
382
client_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)383 static ssize_t client_statistic_show(struct kobject *kobj,
384 struct sbi_attribute *attr, char *buf)
385 {
386 int i, ret;
387 const size_t size = PAGE_SIZE - 1;
388 ssize_t pos = 0;
389 struct client_statistic *stat = to_sbi(kobj)->s_client_statis;
390
391 for (i = 0; i < F_SIZE; i++) {
392
393 ret = snprintf(buf + pos, size - pos,
394 "%llu %llu %llu %llu %llu %u\n",
395 stat[i].snd_cnt,
396 stat[i].snd_fail_cnt,
397 stat[i].resp_cnt,
398 stat[i].timeout_cnt,
399 stat[i].delay_resp_cnt,
400 jiffies_to_msecs(stat[i].max));
401 if (ret > size - pos)
402 break;
403 pos += ret;
404 }
405
406 /* If break, we should add a new line */
407 if (i < F_SIZE) {
408 ret = snprintf(buf + pos, size + 1 - pos, "\n");
409 pos += ret;
410 }
411
412 return pos;
413 }
414
415 static struct sbi_attribute sbi_delay_resp_attr = __ATTR_RO(client_statistic);
416
pages_to_kbytes(unsigned long page)417 static inline unsigned long pages_to_kbytes(unsigned long page)
418 {
419 return page << (PAGE_SHIFT - 10);
420 }
421
dirty_writeback_stats_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)422 static ssize_t dirty_writeback_stats_show(struct kobject *kobj,
423 struct sbi_attribute *attr,
424 char *buf)
425 {
426 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
427 struct hmdfs_writeback *hwb = sbi->h_wb;
428 unsigned long avg;
429 unsigned long max;
430 unsigned long min;
431
432 spin_lock(&hwb->write_bandwidth_lock);
433 avg = hwb->avg_write_bandwidth;
434 max = hwb->max_write_bandwidth;
435 min = hwb->min_write_bandwidth;
436 spin_unlock(&hwb->write_bandwidth_lock);
437
438 if (min == ULONG_MAX)
439 min = 0;
440
441 return snprintf(buf, PAGE_SIZE,
442 "%10lu\n"
443 "%10lu\n"
444 "%10lu\n",
445 pages_to_kbytes(avg),
446 pages_to_kbytes(max),
447 pages_to_kbytes(min));
448 }
449
450 static struct sbi_attribute sbi_dirty_writeback_stats_attr =
451 __ATTR_RO(dirty_writeback_stats);
452
sbi_wb_timeout_ms_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)453 static ssize_t sbi_wb_timeout_ms_show(struct kobject *kobj,
454 struct sbi_attribute *attr,
455 char *buf)
456 {
457 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
458
459 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->wb_timeout_ms);
460 }
461
sbi_wb_timeout_ms_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)462 static ssize_t sbi_wb_timeout_ms_store(struct kobject *kobj,
463 struct sbi_attribute *attr,
464 const char *buf, size_t len)
465 {
466 struct hmdfs_sb_info *sbi = to_sbi(kobj);
467 unsigned int val;
468 int err;
469
470 err = kstrtouint(buf, 10, &val);
471 if (err)
472 return err;
473
474 if (!val || val > HMDFS_MAX_WB_TIMEOUT_MS)
475 return -EINVAL;
476
477 sbi->wb_timeout_ms = val;
478
479 return len;
480 }
481
482 static struct sbi_attribute sbi_wb_timeout_ms_attr =
483 __ATTR(wb_timeout_ms, 0664, sbi_wb_timeout_ms_show,
484 sbi_wb_timeout_ms_store);
485
sbi_dirty_writeback_centisecs_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)486 static ssize_t sbi_dirty_writeback_centisecs_show(struct kobject *kobj,
487 struct sbi_attribute *attr,
488 char *buf)
489 {
490 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
491
492 return snprintf(buf, PAGE_SIZE, "%u\n",
493 sbi->h_wb->dirty_writeback_interval);
494 }
495
sbi_dirty_writeback_centisecs_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)496 static ssize_t sbi_dirty_writeback_centisecs_store(struct kobject *kobj,
497 struct sbi_attribute *attr,
498 const char *buf, size_t len)
499 {
500 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
501 int err;
502
503 err = kstrtouint(buf, 10, &sbi->h_wb->dirty_writeback_interval);
504 if (err)
505 return err;
506 return len;
507 }
508
509 static struct sbi_attribute sbi_dirty_writeback_centisecs_attr =
510 __ATTR(dirty_writeback_centisecs, 0664,
511 sbi_dirty_writeback_centisecs_show,
512 sbi_dirty_writeback_centisecs_store);
513
sbi_dirty_file_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)514 static ssize_t sbi_dirty_file_background_bytes_show(struct kobject *kobj,
515 struct sbi_attribute *attr,
516 char *buf)
517 {
518 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
519
520 return snprintf(buf, PAGE_SIZE, "%lu\n",
521 sbi->h_wb->dirty_file_bg_bytes);
522 }
523
sbi_dirty_file_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)524 static ssize_t sbi_dirty_file_background_bytes_store(struct kobject *kobj,
525 struct sbi_attribute *attr,
526 const char *buf,
527 size_t len)
528 {
529 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
530 unsigned long file_background_bytes = 0;
531 int err;
532
533 err = kstrtoul(buf, 10, &file_background_bytes);
534 if (err)
535 return err;
536 if (file_background_bytes == 0)
537 return -EINVAL;
538
539 sbi->h_wb->dirty_fs_bytes =
540 max(sbi->h_wb->dirty_fs_bytes, file_background_bytes);
541 sbi->h_wb->dirty_fs_bg_bytes =
542 max(sbi->h_wb->dirty_fs_bg_bytes, file_background_bytes);
543 sbi->h_wb->dirty_file_bytes =
544 max(sbi->h_wb->dirty_file_bytes, file_background_bytes);
545
546 sbi->h_wb->dirty_file_bg_bytes = file_background_bytes;
547 hmdfs_calculate_dirty_thresh(sbi->h_wb);
548 hmdfs_update_ratelimit(sbi->h_wb);
549 return len;
550 }
551
sbi_dirty_fs_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)552 static ssize_t sbi_dirty_fs_background_bytes_show(struct kobject *kobj,
553 struct sbi_attribute *attr,
554 char *buf)
555 {
556 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
557
558 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bg_bytes);
559 }
560
sbi_dirty_fs_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)561 static ssize_t sbi_dirty_fs_background_bytes_store(struct kobject *kobj,
562 struct sbi_attribute *attr,
563 const char *buf, size_t len)
564 {
565 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
566 unsigned long fs_background_bytes = 0;
567 int err;
568
569 err = kstrtoul(buf, 10, &fs_background_bytes);
570 if (err)
571 return err;
572 if (fs_background_bytes == 0)
573 return -EINVAL;
574
575 sbi->h_wb->dirty_file_bg_bytes =
576 min(sbi->h_wb->dirty_file_bg_bytes, fs_background_bytes);
577 sbi->h_wb->dirty_fs_bytes =
578 max(sbi->h_wb->dirty_fs_bytes, fs_background_bytes);
579
580 sbi->h_wb->dirty_fs_bg_bytes = fs_background_bytes;
581 hmdfs_calculate_dirty_thresh(sbi->h_wb);
582 hmdfs_update_ratelimit(sbi->h_wb);
583 return len;
584 }
585
586 static struct sbi_attribute sbi_dirty_file_background_bytes_attr =
587 __ATTR(dirty_file_background_bytes, 0644,
588 sbi_dirty_file_background_bytes_show,
589 sbi_dirty_file_background_bytes_store);
590 static struct sbi_attribute sbi_dirty_fs_background_bytes_attr =
591 __ATTR(dirty_fs_background_bytes, 0644,
592 sbi_dirty_fs_background_bytes_show,
593 sbi_dirty_fs_background_bytes_store);
594
sbi_dirty_file_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)595 static ssize_t sbi_dirty_file_bytes_show(struct kobject *kobj,
596 struct sbi_attribute *attr, char *buf)
597 {
598 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
599
600 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_file_bytes);
601 }
602
sbi_dirty_file_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)603 static ssize_t sbi_dirty_file_bytes_store(struct kobject *kobj,
604 struct sbi_attribute *attr,
605 const char *buf, size_t len)
606 {
607 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
608 unsigned long file_bytes = 0;
609 int err;
610
611 err = kstrtoul(buf, 10, &file_bytes);
612 if (err)
613 return err;
614 if (file_bytes == 0)
615 return -EINVAL;
616
617 sbi->h_wb->dirty_file_bg_bytes =
618 min(sbi->h_wb->dirty_file_bg_bytes, file_bytes);
619 sbi->h_wb->dirty_fs_bytes = max(sbi->h_wb->dirty_fs_bytes, file_bytes);
620
621 sbi->h_wb->dirty_file_bytes = file_bytes;
622 hmdfs_calculate_dirty_thresh(sbi->h_wb);
623 hmdfs_update_ratelimit(sbi->h_wb);
624 return len;
625 }
626
sbi_dirty_fs_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)627 static ssize_t sbi_dirty_fs_bytes_show(struct kobject *kobj,
628 struct sbi_attribute *attr, char *buf)
629 {
630 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
631
632 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bytes);
633 }
634
sbi_dirty_fs_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)635 static ssize_t sbi_dirty_fs_bytes_store(struct kobject *kobj,
636 struct sbi_attribute *attr,
637 const char *buf, size_t len)
638 {
639 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
640 unsigned long fs_bytes = 0;
641 int err;
642
643 err = kstrtoul(buf, 10, &fs_bytes);
644 if (err)
645 return err;
646 if (fs_bytes == 0)
647 return -EINVAL;
648
649 sbi->h_wb->dirty_file_bg_bytes =
650 min(sbi->h_wb->dirty_file_bg_bytes, fs_bytes);
651 sbi->h_wb->dirty_file_bytes =
652 min(sbi->h_wb->dirty_file_bytes, fs_bytes);
653 sbi->h_wb->dirty_fs_bg_bytes =
654 min(sbi->h_wb->dirty_fs_bg_bytes, fs_bytes);
655
656 sbi->h_wb->dirty_fs_bytes = fs_bytes;
657 hmdfs_calculate_dirty_thresh(sbi->h_wb);
658 hmdfs_update_ratelimit(sbi->h_wb);
659 return len;
660 }
661
662 static struct sbi_attribute sbi_dirty_file_bytes_attr =
663 __ATTR(dirty_file_bytes, 0644, sbi_dirty_file_bytes_show,
664 sbi_dirty_file_bytes_store);
665 static struct sbi_attribute sbi_dirty_fs_bytes_attr =
666 __ATTR(dirty_fs_bytes, 0644, sbi_dirty_fs_bytes_show,
667 sbi_dirty_fs_bytes_store);
668
sbi_dirty_writeback_timelimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)669 static ssize_t sbi_dirty_writeback_timelimit_show(struct kobject *kobj,
670 struct sbi_attribute *attr,
671 char *buf)
672 {
673 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
674
675 return snprintf(buf, PAGE_SIZE, "%u\n",
676 sbi->h_wb->writeback_timelimit / HZ);
677 }
678
sbi_dirty_writeback_timelimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)679 static ssize_t sbi_dirty_writeback_timelimit_store(struct kobject *kobj,
680 struct sbi_attribute *attr,
681 const char *buf,
682 size_t len)
683 {
684 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
685 unsigned int time_limit = 0;
686 int err;
687
688 err = kstrtouint(buf, 10, &time_limit);
689 if (err)
690 return err;
691 if (time_limit == 0 || time_limit > (HMDFS_MAX_WB_TIMELIMIT / HZ))
692 return -EINVAL;
693
694 sbi->h_wb->writeback_timelimit = time_limit * HZ;
695 return len;
696 }
697
698 static struct sbi_attribute sbi_dirty_writeback_timelimit_attr =
699 __ATTR(dirty_writeback_timelimit, 0644, sbi_dirty_writeback_timelimit_show,
700 sbi_dirty_writeback_timelimit_store);
701
sbi_dirty_thresh_lowerlimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)702 static ssize_t sbi_dirty_thresh_lowerlimit_show(struct kobject *kobj,
703 struct sbi_attribute *attr,
704 char *buf)
705 {
706 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
707
708 return snprintf(buf, PAGE_SIZE, "%lu\n",
709 sbi->h_wb->bw_thresh_lowerlimit << PAGE_SHIFT);
710 }
711
sbi_dirty_thresh_lowerlimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)712 static ssize_t sbi_dirty_thresh_lowerlimit_store(struct kobject *kobj,
713 struct sbi_attribute *attr,
714 const char *buf,
715 size_t len)
716 {
717 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
718 unsigned long bw_thresh_lowerbytes = 0;
719 unsigned long bw_thresh_lowerlimit;
720 int err;
721
722 err = kstrtoul(buf, 10, &bw_thresh_lowerbytes);
723 if (err)
724 return err;
725
726 bw_thresh_lowerlimit = DIV_ROUND_UP(bw_thresh_lowerbytes, PAGE_SIZE);
727 if (bw_thresh_lowerlimit < HMDFS_BW_THRESH_MIN_LIMIT ||
728 bw_thresh_lowerlimit > HMDFS_BW_THRESH_MAX_LIMIT)
729 return -EINVAL;
730
731 sbi->h_wb->bw_thresh_lowerlimit = bw_thresh_lowerlimit;
732 return len;
733 }
734
735 static struct sbi_attribute sbi_dirty_thresh_lowerlimit_attr =
736 __ATTR(dirty_thresh_lowerlimit, 0644, sbi_dirty_thresh_lowerlimit_show,
737 sbi_dirty_thresh_lowerlimit_store);
738
sbi_dirty_writeback_autothresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)739 static ssize_t sbi_dirty_writeback_autothresh_show(struct kobject *kobj,
740 struct sbi_attribute *attr,
741 char *buf)
742 {
743 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
744
745 return snprintf(buf, PAGE_SIZE, "%d\n",
746 sbi->h_wb->dirty_auto_threshold);
747 }
748
sbi_dirty_writeback_autothresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)749 static ssize_t sbi_dirty_writeback_autothresh_store(struct kobject *kobj,
750 struct sbi_attribute *attr,
751 const char *buf,
752 size_t len)
753 {
754 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
755 bool dirty_auto_threshold = false;
756 int err;
757
758 err = kstrtobool(buf, &dirty_auto_threshold);
759 if (err)
760 return err;
761
762 sbi->h_wb->dirty_auto_threshold = dirty_auto_threshold;
763 return len;
764 }
765
766 static struct sbi_attribute sbi_dirty_writeback_autothresh_attr =
767 __ATTR(dirty_writeback_autothresh, 0644, sbi_dirty_writeback_autothresh_show,
768 sbi_dirty_writeback_autothresh_store);
769
sbi_dirty_writeback_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)770 static ssize_t sbi_dirty_writeback_control_show(struct kobject *kobj,
771 struct sbi_attribute *attr,
772 char *buf)
773 {
774 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
775
776 return snprintf(buf, PAGE_SIZE, "%d\n",
777 sbi->h_wb->dirty_writeback_control);
778 }
779
sbi_dirty_writeback_control_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)780 static ssize_t sbi_dirty_writeback_control_store(struct kobject *kobj,
781 struct sbi_attribute *attr,
782 const char *buf, size_t len)
783 {
784 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
785 unsigned int dirty_writeback_control = 0;
786 int err;
787
788 err = kstrtouint(buf, 10, &dirty_writeback_control);
789 if (err)
790 return err;
791
792 sbi->h_wb->dirty_writeback_control = (bool)dirty_writeback_control;
793 return len;
794 }
795
796 static struct sbi_attribute sbi_dirty_writeback_control_attr =
797 __ATTR(dirty_writeback_control, 0644, sbi_dirty_writeback_control_show,
798 sbi_dirty_writeback_control_store);
799
sbi_srv_dirty_thresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)800 static ssize_t sbi_srv_dirty_thresh_show(struct kobject *kobj,
801 struct sbi_attribute *attr,
802 char *buf)
803 {
804 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
805
806 return snprintf(buf, PAGE_SIZE, "%d\n",
807 sbi->h_swb->dirty_thresh_pg >> HMDFS_MB_TO_PAGE_SHIFT);
808 }
809
sbi_srv_dirty_thresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)810 static ssize_t sbi_srv_dirty_thresh_store(struct kobject *kobj,
811 struct sbi_attribute *attr,
812 const char *buf,
813 size_t len)
814 {
815 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
816 int dirty_thresh_mb;
817 unsigned long long pages;
818 int err;
819
820 err = kstrtoint(buf, 10, &dirty_thresh_mb);
821 if (err)
822 return err;
823
824 if (dirty_thresh_mb <= 0)
825 return -EINVAL;
826
827 pages = dirty_thresh_mb;
828 pages <<= HMDFS_MB_TO_PAGE_SHIFT;
829 if (pages > INT_MAX) {
830 hmdfs_err("Illegal dirty_thresh_mb %d, its page count beyonds max int",
831 dirty_thresh_mb);
832 return -EINVAL;
833 }
834
835 hswb->dirty_thresh_pg = (unsigned int)pages;
836 return len;
837 }
838
839 static struct sbi_attribute sbi_srv_dirty_thresh_attr =
840 __ATTR(srv_dirty_thresh, 0644, sbi_srv_dirty_thresh_show,
841 sbi_srv_dirty_thresh_store);
842
843
sbi_srv_dirty_wb_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)844 static ssize_t sbi_srv_dirty_wb_control_show(struct kobject *kobj,
845 struct sbi_attribute *attr,
846 char *buf)
847 {
848 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
849
850 return snprintf(buf, PAGE_SIZE, "%d\n",
851 sbi->h_swb->dirty_writeback_control);
852 }
853
sbi_srv_dirty_wb_conctrol_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)854 static ssize_t sbi_srv_dirty_wb_conctrol_store(struct kobject *kobj,
855 struct sbi_attribute *attr,
856 const char *buf,
857 size_t len)
858 {
859 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
860 bool dirty_writeback_control = true;
861 int err;
862
863 err = kstrtobool(buf, &dirty_writeback_control);
864 if (err)
865 return err;
866
867 hswb->dirty_writeback_control = dirty_writeback_control;
868
869 return len;
870 }
871
872 static struct sbi_attribute sbi_srv_dirty_wb_control_attr =
873 __ATTR(srv_dirty_writeback_control, 0644, sbi_srv_dirty_wb_control_show,
874 sbi_srv_dirty_wb_conctrol_store);
875
sbi_dcache_timeout_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)876 static ssize_t sbi_dcache_timeout_show(struct kobject *kobj,
877 struct sbi_attribute *attr, char *buf)
878 {
879 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
880
881 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->dcache_timeout);
882 }
883
sbi_dcache_timeout_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)884 static ssize_t sbi_dcache_timeout_store(struct kobject *kobj,
885 struct sbi_attribute *attr,
886 const char *buf, size_t len)
887 {
888 struct hmdfs_sb_info *sbi = to_sbi(kobj);
889 unsigned int timeout;
890 int err;
891
892 err = kstrtouint(buf, 0, &timeout);
893 if (err)
894 return err;
895
896 /* zero is invalid, and it doesn't mean no cache */
897 if (timeout == 0 || timeout > MAX_DCACHE_TIMEOUT)
898 return -EINVAL;
899
900 sbi->dcache_timeout = timeout;
901
902 return len;
903 }
904
905 static struct sbi_attribute sbi_dcache_timeout_attr =
906 __ATTR(dcache_timeout, 0644, sbi_dcache_timeout_show,
907 sbi_dcache_timeout_store);
908
sbi_write_cache_timeout_sec_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)909 static ssize_t sbi_write_cache_timeout_sec_show(struct kobject *kobj,
910 struct sbi_attribute *attr, char *buf)
911 {
912 return snprintf(buf, PAGE_SIZE, "%u\n",
913 to_sbi(kobj)->write_cache_timeout);
914 }
915
sbi_write_cache_timeout_sec_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)916 static ssize_t sbi_write_cache_timeout_sec_store(struct kobject *kobj,
917 struct sbi_attribute *attr, const char *buf, size_t len)
918 {
919 int ret;
920 unsigned int timeout;
921 struct hmdfs_sb_info *sbi = to_sbi(kobj);
922
923 ret = kstrtouint(buf, 0, &timeout);
924 if (ret)
925 return ret;
926
927 /* set write_cache_timeout to 0 means this functionality is disabled */
928 sbi->write_cache_timeout = timeout;
929
930 return len;
931 }
932
933 static struct sbi_attribute sbi_write_cache_timeout_sec_attr =
934 __ATTR(write_cache_timeout_sec, 0664, sbi_write_cache_timeout_sec_show,
935 sbi_write_cache_timeout_sec_store);
936
sbi_node_evt_cb_delay_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)937 static ssize_t sbi_node_evt_cb_delay_show(struct kobject *kobj,
938 struct sbi_attribute *attr,
939 char *buf)
940 {
941 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
942
943 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_cb_delay);
944 }
945
sbi_node_evt_cb_delay_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)946 static ssize_t sbi_node_evt_cb_delay_store(struct kobject *kobj,
947 struct sbi_attribute *attr,
948 const char *buf,
949 size_t len)
950 {
951 struct hmdfs_sb_info *sbi = to_sbi(kobj);
952 unsigned int delay = 0;
953 int err;
954
955 err = kstrtouint(buf, 10, &delay);
956 if (err)
957 return err;
958
959 sbi->async_cb_delay = delay;
960
961 return len;
962 }
963
964 static struct sbi_attribute sbi_node_evt_cb_delay_attr =
965 __ATTR(node_event_delay, 0644, sbi_node_evt_cb_delay_show,
966 sbi_node_evt_cb_delay_store);
967
calc_idr_number(struct idr * idr)968 static int calc_idr_number(struct idr *idr)
969 {
970 void *entry = NULL;
971 int id;
972 int number = 0;
973
974 idr_for_each_entry(idr, entry, id) {
975 number++;
976 if (number % HMDFS_IDR_RESCHED_COUNT == 0)
977 cond_resched();
978 }
979
980 return number;
981 }
982
sbi_show_idr_stats(struct kobject * kobj,struct sbi_attribute * attr,char * buf,bool showmsg)983 static ssize_t sbi_show_idr_stats(struct kobject *kobj,
984 struct sbi_attribute *attr,
985 char *buf, bool showmsg)
986 {
987 ssize_t size = 0;
988 int count;
989 struct hmdfs_sb_info *sbi = NULL;
990 struct hmdfs_peer *peer = NULL;
991 struct idr *idr = NULL;
992
993 sbi = to_sbi(kobj);
994
995 mutex_lock(&sbi->connections.node_lock);
996 list_for_each_entry(peer, &sbi->connections.node_list, list) {
997 idr = showmsg ? &peer->msg_idr : &peer->file_id_idr;
998 count = calc_idr_number(idr);
999 size += snprintf(buf + size, PAGE_SIZE - size,
1000 "device-id\tcount\tnext-id\n\t%llu\t\t%d\t%u\n",
1001 peer->device_id, count, idr_get_cursor(idr));
1002 if (size >= PAGE_SIZE) {
1003 size = PAGE_SIZE;
1004 break;
1005 }
1006 }
1007 mutex_unlock(&sbi->connections.node_lock);
1008
1009 return size;
1010 }
1011
pending_message_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1012 static ssize_t pending_message_show(struct kobject *kobj,
1013 struct sbi_attribute *attr,
1014 char *buf)
1015 {
1016 return sbi_show_idr_stats(kobj, attr, buf, true);
1017 }
1018
1019 static struct sbi_attribute sbi_pending_message_attr =
1020 __ATTR_RO(pending_message);
1021
peer_opened_fd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1022 static ssize_t peer_opened_fd_show(struct kobject *kobj,
1023 struct sbi_attribute *attr, char *buf)
1024 {
1025 return sbi_show_idr_stats(kobj, attr, buf, false);
1026 }
1027
1028 static struct sbi_attribute sbi_peer_opened_fd_attr = __ATTR_RO(peer_opened_fd);
1029
sbi_srv_req_max_active_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1030 static ssize_t sbi_srv_req_max_active_attr_show(struct kobject *kobj,
1031 struct sbi_attribute *attr,
1032 char *buf)
1033 {
1034 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
1035
1036 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_req_max_active);
1037 }
1038
sbi_srv_req_max_active_attr_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)1039 static ssize_t sbi_srv_req_max_active_attr_store(struct kobject *kobj,
1040 struct sbi_attribute *attr, const char *buf, size_t len)
1041 {
1042 int ret;
1043 unsigned int max_active;
1044 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1045
1046 ret = kstrtouint(buf, 0, &max_active);
1047 if (ret)
1048 return ret;
1049
1050 sbi->async_req_max_active = max_active;
1051
1052 return len;
1053 }
1054
1055 static struct sbi_attribute sbi_srv_req_max_active_attr =
1056 __ATTR(srv_req_handle_max_active, 0644, sbi_srv_req_max_active_attr_show,
1057 sbi_srv_req_max_active_attr_store);
1058
1059
cache_file_show(struct hmdfs_sb_info * sbi,struct list_head * head,char * buf)1060 static ssize_t cache_file_show(struct hmdfs_sb_info *sbi,
1061 struct list_head *head, char *buf)
1062 {
1063 struct cache_file_node *cfn = NULL;
1064 ssize_t pos = 0;
1065
1066 mutex_lock(&sbi->cache_list_lock);
1067 list_for_each_entry(cfn, head, list) {
1068 pos += snprintf(buf + pos, PAGE_SIZE - pos,
1069 "dev_id: %s relative_path: %s\n",
1070 cfn->cid, cfn->relative_path);
1071 if (pos >= PAGE_SIZE) {
1072 pos = PAGE_SIZE;
1073 break;
1074 }
1075 }
1076 mutex_unlock(&sbi->cache_list_lock);
1077
1078 return pos;
1079 }
1080
client_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1081 static ssize_t client_cache_file_show(struct kobject *kobj,
1082 struct sbi_attribute *attr, char *buf)
1083 {
1084 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->client_cache, buf);
1085 }
server_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1086 static ssize_t server_cache_file_show(struct kobject *kobj,
1087 struct sbi_attribute *attr, char *buf)
1088 {
1089 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->server_cache, buf);
1090 }
1091
1092 static struct sbi_attribute sbi_server_cache_file_attr =
1093 __ATTR_RO(server_cache_file);
1094 static struct sbi_attribute sbi_client_cache_file_attr =
1095 __ATTR_RO(client_cache_file);
1096
sb_seq_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1097 static ssize_t sb_seq_show(struct kobject *kobj, struct sbi_attribute *attr,
1098 char *buf)
1099 {
1100 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->seq);
1101 }
1102
1103 static struct sbi_attribute sbi_seq_attr = __ATTR_RO(sb_seq);
1104
peers_sum_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1105 static ssize_t peers_sum_attr_show(struct kobject *kobj,
1106 struct sbi_attribute *attr, char *buf)
1107 {
1108 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1109 struct hmdfs_peer *node = NULL;
1110 unsigned int stash_ok = 0, stash_fail = 0, restore_ok = 0,
1111 restore_fail = 0, rebuild_ok = 0, rebuild_fail = 0, rebuild_invalid = 0,
1112 rebuild_time = 0;
1113 unsigned long long stash_ok_pages = 0, stash_fail_pages = 0,
1114 restore_ok_pages = 0, restore_fail_pages = 0;
1115
1116 mutex_lock(&sbi->connections.node_lock);
1117 list_for_each_entry(node, &sbi->connections.node_list, list) {
1118 peer_get(node);
1119 mutex_unlock(&sbi->connections.node_lock);
1120 stash_ok += node->stats.stash.total_ok;
1121 stash_fail += node->stats.stash.total_fail;
1122 stash_ok_pages += node->stats.stash.ok_pages;
1123 stash_fail_pages += node->stats.stash.fail_pages;
1124 restore_ok += node->stats.restore.total_ok;
1125 restore_fail += node->stats.restore.total_fail;
1126 restore_ok_pages += node->stats.restore.ok_pages;
1127 restore_fail_pages += node->stats.restore.fail_pages;
1128 rebuild_ok += node->stats.rebuild.total_ok;
1129 rebuild_fail += node->stats.rebuild.total_fail;
1130 rebuild_invalid += node->stats.rebuild.total_invalid;
1131 rebuild_time += node->stats.rebuild.time;
1132 peer_put(node);
1133 mutex_lock(&sbi->connections.node_lock);
1134 }
1135 mutex_unlock(&sbi->connections.node_lock);
1136
1137 return snprintf(buf, PAGE_SIZE,
1138 "%u %u %llu %llu\n"
1139 "%u %u %llu %llu\n"
1140 "%u %u %u %u\n",
1141 stash_ok, stash_fail, stash_ok_pages, stash_fail_pages,
1142 restore_ok, restore_fail, restore_ok_pages,
1143 restore_fail_pages, rebuild_ok, rebuild_fail,
1144 rebuild_invalid, rebuild_time);
1145 }
1146
1147 static struct sbi_attribute sbi_peers_attr = __ATTR_RO(peers_sum_attr);
1148
1149 const char * const flag_name[] = {
1150 "READPAGES",
1151 "READPAGES_OPEN",
1152 "ATOMIC_OPEN",
1153 };
1154
fill_features(char * buf,unsigned long long flag)1155 static ssize_t fill_features(char *buf, unsigned long long flag)
1156 {
1157 int i;
1158 ssize_t pos = 0;
1159 bool sep = false;
1160 int flag_name_count = ARRAY_SIZE(flag_name) / sizeof(flag_name[0]);
1161
1162 for (i = 0; i < sizeof(flag) * BITS_PER_BYTE; ++i) {
1163 if (!(flag & BIT(i)))
1164 continue;
1165
1166 if (sep)
1167 pos += snprintf(buf + pos, PAGE_SIZE - pos, "|");
1168 sep = true;
1169
1170 if (pos >= PAGE_SIZE) {
1171 pos = PAGE_SIZE;
1172 break;
1173 }
1174
1175 if (i < flag_name_count && flag_name[i])
1176 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%s",
1177 flag_name[i]);
1178 else
1179 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d", i);
1180
1181 if (pos >= PAGE_SIZE) {
1182 pos = PAGE_SIZE;
1183 break;
1184 }
1185 }
1186 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1187 if (pos >= PAGE_SIZE)
1188 pos = PAGE_SIZE;
1189
1190 return pos;
1191 }
1192
sbi_features_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1193 static ssize_t sbi_features_show(struct kobject *kobj,
1194 struct sbi_attribute *attr, char *buf)
1195 {
1196 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1197
1198 return fill_features(buf, sbi->s_features);
1199 }
1200
1201 static struct sbi_attribute sbi_features_attr = __ATTR(features, 0444,
1202 sbi_features_show, NULL);
1203
1204 static struct attribute *sbi_attrs[] = {
1205 &sbi_cmd_attr.attr,
1206 &sbi_status_attr.attr,
1207 &sbi_statistic_attr.attr,
1208 &sbi_dcache_precision_attr.attr,
1209 &sbi_dcache_threshold_attr.attr,
1210 &sbi_dcache_timeout_attr.attr,
1211 &sbi_write_cache_timeout_sec_attr.attr,
1212 &sbi_local_op_attr.attr,
1213 &sbi_delay_resp_attr.attr,
1214 &sbi_wb_timeout_ms_attr.attr,
1215 &sbi_dirty_writeback_centisecs_attr.attr,
1216 &sbi_dirty_file_background_bytes_attr.attr,
1217 &sbi_dirty_fs_background_bytes_attr.attr,
1218 &sbi_dirty_file_bytes_attr.attr,
1219 &sbi_dirty_fs_bytes_attr.attr,
1220 &sbi_dirty_writeback_autothresh_attr.attr,
1221 &sbi_dirty_writeback_timelimit_attr.attr,
1222 &sbi_dirty_thresh_lowerlimit_attr.attr,
1223 &sbi_dirty_writeback_control_attr.attr,
1224 &sbi_dirty_writeback_stats_attr.attr,
1225 &sbi_srv_dirty_thresh_attr.attr,
1226 &sbi_srv_dirty_wb_control_attr.attr,
1227 &sbi_node_evt_cb_delay_attr.attr,
1228 &sbi_srv_req_max_active_attr.attr,
1229 &sbi_pending_message_attr.attr,
1230 &sbi_peer_opened_fd_attr.attr,
1231 &sbi_server_cache_file_attr.attr,
1232 &sbi_client_cache_file_attr.attr,
1233 &sbi_seq_attr.attr,
1234 &sbi_peers_attr.attr,
1235 &sbi_features_attr.attr,
1236 NULL,
1237 };
1238
sbi_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1239 static ssize_t sbi_attr_show(struct kobject *kobj, struct attribute *attr,
1240 char *buf)
1241 {
1242 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1243
1244 if (!sbi_attr->show)
1245 return -EIO;
1246 return sbi_attr->show(kobj, sbi_attr, buf);
1247 }
1248
sbi_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1249 static ssize_t sbi_attr_store(struct kobject *kobj, struct attribute *attr,
1250 const char *buf, size_t len)
1251 {
1252 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1253
1254 if (!sbi_attr->store)
1255 return -EIO;
1256 return sbi_attr->store(kobj, sbi_attr, buf, len);
1257 }
1258
1259 static const struct sysfs_ops sbi_sysfs_ops = {
1260 .show = sbi_attr_show,
1261 .store = sbi_attr_store,
1262 };
1263
sbi_release(struct kobject * kobj)1264 static void sbi_release(struct kobject *kobj)
1265 {
1266 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1267
1268 complete(&sbi->s_kobj_unregister);
1269 }
1270
1271 static struct kobj_type sbi_ktype = {
1272 .sysfs_ops = &sbi_sysfs_ops,
1273 .default_attrs = sbi_attrs,
1274 .release = sbi_release,
1275 };
1276
to_sbi_cmd_attr(struct attribute * x)1277 static inline struct sbi_cmd_attribute *to_sbi_cmd_attr(struct attribute *x)
1278 {
1279 return container_of(x, struct sbi_cmd_attribute, attr);
1280 }
1281
cmd_kobj_to_sbi(struct kobject * x)1282 static inline struct hmdfs_sb_info *cmd_kobj_to_sbi(struct kobject *x)
1283 {
1284 return container_of(x, struct hmdfs_sb_info, s_cmd_timeout_kobj);
1285 }
1286
cmd_timeout_show(struct kobject * kobj,struct attribute * attr,char * buf)1287 static ssize_t cmd_timeout_show(struct kobject *kobj, struct attribute *attr,
1288 char *buf)
1289 {
1290 int cmd = to_sbi_cmd_attr(attr)->command;
1291 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1292
1293 if (cmd < 0 || cmd >= F_SIZE)
1294 return 0;
1295
1296 return snprintf(buf, PAGE_SIZE, "%u\n", get_cmd_timeout(sbi, cmd));
1297 }
1298
cmd_timeout_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1299 static ssize_t cmd_timeout_store(struct kobject *kobj, struct attribute *attr,
1300 const char *buf, size_t len)
1301 {
1302 unsigned int value;
1303 int cmd = to_sbi_cmd_attr(attr)->command;
1304 int ret = kstrtouint(skip_spaces(buf), 0, &value);
1305 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1306
1307 if (cmd < 0 || cmd >= F_SIZE)
1308 return -EINVAL;
1309
1310 if (!ret)
1311 set_cmd_timeout(sbi, cmd, value);
1312
1313 return ret ? ret : len;
1314 }
1315
1316 #define HMDFS_CMD_ATTR(_name, _cmd) \
1317 static struct sbi_cmd_attribute hmdfs_attr_##_name = { \
1318 .attr = { .name = __stringify(_name), .mode = 0664 }, \
1319 .command = (_cmd), \
1320 }
1321
1322 HMDFS_CMD_ATTR(open, F_OPEN);
1323 HMDFS_CMD_ATTR(release, F_RELEASE);
1324 HMDFS_CMD_ATTR(readpage, F_READPAGE);
1325 HMDFS_CMD_ATTR(writepage, F_WRITEPAGE);
1326 HMDFS_CMD_ATTR(iterate, F_ITERATE);
1327 HMDFS_CMD_ATTR(rmdir, F_RMDIR);
1328 HMDFS_CMD_ATTR(unlink, F_UNLINK);
1329 HMDFS_CMD_ATTR(rename, F_RENAME);
1330 HMDFS_CMD_ATTR(setattr, F_SETATTR);
1331 HMDFS_CMD_ATTR(statfs, F_STATFS);
1332 HMDFS_CMD_ATTR(drop_push, F_DROP_PUSH);
1333 HMDFS_CMD_ATTR(getattr, F_GETATTR);
1334 HMDFS_CMD_ATTR(fsync, F_FSYNC);
1335 HMDFS_CMD_ATTR(syncfs, F_SYNCFS);
1336 HMDFS_CMD_ATTR(getxattr, F_GETXATTR);
1337 HMDFS_CMD_ATTR(setxattr, F_SETXATTR);
1338 HMDFS_CMD_ATTR(listxattr, F_LISTXATTR);
1339
1340 #define ATTR_LIST(_name) (&hmdfs_attr_##_name.attr)
1341
1342 static struct attribute *sbi_timeout_attrs[] = {
1343 ATTR_LIST(open), ATTR_LIST(release),
1344 ATTR_LIST(readpage), ATTR_LIST(writepage),
1345 ATTR_LIST(iterate), ATTR_LIST(rmdir),
1346 ATTR_LIST(unlink), ATTR_LIST(rename),
1347 ATTR_LIST(setattr),
1348 ATTR_LIST(statfs), ATTR_LIST(drop_push),
1349 ATTR_LIST(getattr), ATTR_LIST(fsync),
1350 ATTR_LIST(syncfs), ATTR_LIST(getxattr),
1351 ATTR_LIST(setxattr), ATTR_LIST(listxattr),
1352 NULL
1353 };
1354
1355 static const struct sysfs_ops sbi_cmd_sysfs_ops = {
1356 .show = cmd_timeout_show,
1357 .store = cmd_timeout_store,
1358 };
1359
sbi_timeout_release(struct kobject * kobj)1360 static void sbi_timeout_release(struct kobject *kobj)
1361 {
1362 struct hmdfs_sb_info *sbi = container_of(kobj, struct hmdfs_sb_info,
1363 s_cmd_timeout_kobj);
1364
1365 complete(&sbi->s_timeout_kobj_unregister);
1366 }
1367
1368 static struct kobj_type sbi_timeout_ktype = {
1369 .sysfs_ops = &sbi_cmd_sysfs_ops,
1370 .default_attrs = sbi_timeout_attrs,
1371 .release = sbi_timeout_release,
1372 };
1373
hmdfs_release_sysfs(struct hmdfs_sb_info * sbi)1374 void hmdfs_release_sysfs(struct hmdfs_sb_info *sbi)
1375 {
1376 kobject_put(&sbi->s_cmd_timeout_kobj);
1377 wait_for_completion(&sbi->s_timeout_kobj_unregister);
1378 kobject_put(&sbi->kobj);
1379 wait_for_completion(&sbi->s_kobj_unregister);
1380 }
1381
hmdfs_register_sysfs(const char * name,struct hmdfs_sb_info * sbi)1382 int hmdfs_register_sysfs(const char *name, struct hmdfs_sb_info *sbi)
1383 {
1384 int ret;
1385 struct kobject *kobj = NULL;
1386
1387 mutex_lock(&hmdfs_sysfs_mutex);
1388 kobj = kset_find_obj(hmdfs_kset, name);
1389 if (kobj) {
1390 hmdfs_err("mount failed, already exist");
1391 kobject_put(kobj);
1392 mutex_unlock(&hmdfs_sysfs_mutex);
1393 return -EEXIST;
1394 }
1395
1396 sbi->kobj.kset = hmdfs_kset;
1397 init_completion(&sbi->s_kobj_unregister);
1398 ret = kobject_init_and_add(&sbi->kobj, &sbi_ktype,
1399 &hmdfs_kset->kobj, "%s", name);
1400 sysfs_change_owner(&sbi->kobj, KUIDT_INIT(1000), KGIDT_INIT(1000));
1401 mutex_unlock(&hmdfs_sysfs_mutex);
1402
1403 if (ret) {
1404 kobject_put(&sbi->kobj);
1405 wait_for_completion(&sbi->s_kobj_unregister);
1406 return ret;
1407 }
1408
1409 init_completion(&sbi->s_timeout_kobj_unregister);
1410 ret = kobject_init_and_add(&sbi->s_cmd_timeout_kobj, &sbi_timeout_ktype,
1411 &sbi->kobj, "cmd_timeout");
1412 if (ret) {
1413 hmdfs_release_sysfs(sbi);
1414 return ret;
1415 }
1416
1417 kobject_uevent(&sbi->kobj, KOBJ_ADD);
1418 return 0;
1419 }
1420
hmdfs_unregister_sysfs(struct hmdfs_sb_info * sbi)1421 void hmdfs_unregister_sysfs(struct hmdfs_sb_info *sbi)
1422 {
1423 kobject_del(&sbi->s_cmd_timeout_kobj);
1424 kobject_del(&sbi->kobj);
1425 }
1426
to_sysfs_fmt_evt(unsigned int evt)1427 static inline int to_sysfs_fmt_evt(unsigned int evt)
1428 {
1429 return evt == RAW_NODE_EVT_NR ? -1 : evt;
1430 }
1431
features_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1432 static ssize_t features_show(struct kobject *kobj, struct peer_attribute *attr,
1433 char *buf)
1434 {
1435 struct hmdfs_peer *peer = to_peer(kobj);
1436
1437 return fill_features(buf, peer->features);
1438 }
1439
event_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1440 static ssize_t event_show(struct kobject *kobj, struct peer_attribute *attr,
1441 char *buf)
1442 {
1443 struct hmdfs_peer *peer = to_peer(kobj);
1444
1445 return snprintf(buf, PAGE_SIZE,
1446 "cur_async evt %d seq %u\n"
1447 "cur_sync evt %d seq %u\n"
1448 "pending evt %d seq %u\n"
1449 "merged evt %u\n"
1450 "dup_drop evt %u %u\n"
1451 "waiting evt %u %u\n"
1452 "seq_tbl %u %u %u %u\n"
1453 "seq_rd_idx %u\n"
1454 "seq_wr_idx %u\n",
1455 to_sysfs_fmt_evt(peer->cur_evt[0]),
1456 peer->cur_evt_seq[0],
1457 to_sysfs_fmt_evt(peer->cur_evt[1]),
1458 peer->cur_evt_seq[1],
1459 to_sysfs_fmt_evt(peer->pending_evt),
1460 peer->pending_evt_seq,
1461 peer->merged_evt,
1462 peer->dup_evt[RAW_NODE_EVT_OFF],
1463 peer->dup_evt[RAW_NODE_EVT_ON],
1464 peer->waiting_evt[RAW_NODE_EVT_OFF],
1465 peer->waiting_evt[RAW_NODE_EVT_ON],
1466 peer->seq_tbl[0], peer->seq_tbl[1], peer->seq_tbl[2],
1467 peer->seq_tbl[3],
1468 peer->seq_rd_idx % RAW_NODE_EVT_MAX_NR,
1469 peer->seq_wr_idx % RAW_NODE_EVT_MAX_NR);
1470 }
1471
stash_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1472 static ssize_t stash_show(struct kobject *kobj, struct peer_attribute *attr,
1473 char *buf)
1474 {
1475 struct hmdfs_peer *peer = to_peer(kobj);
1476
1477 return snprintf(buf, PAGE_SIZE,
1478 "cur_ok %u\n"
1479 "cur_nothing %u\n"
1480 "cur_fail %u\n"
1481 "total_ok %u\n"
1482 "total_nothing %u\n"
1483 "total_fail %u\n"
1484 "ok_pages %llu\n"
1485 "fail_pages %llu\n",
1486 peer->stats.stash.cur_ok,
1487 peer->stats.stash.cur_nothing,
1488 peer->stats.stash.cur_fail,
1489 peer->stats.stash.total_ok,
1490 peer->stats.stash.total_nothing,
1491 peer->stats.stash.total_fail,
1492 peer->stats.stash.ok_pages,
1493 peer->stats.stash.fail_pages);
1494 }
1495
restore_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1496 static ssize_t restore_show(struct kobject *kobj, struct peer_attribute *attr,
1497 char *buf)
1498 {
1499 struct hmdfs_peer *peer = to_peer(kobj);
1500
1501 return snprintf(buf, PAGE_SIZE,
1502 "cur_ok %u\n"
1503 "cur_fail %u\n"
1504 "cur_keep %u\n"
1505 "total_ok %u\n"
1506 "total_fail %u\n"
1507 "total_keep %u\n"
1508 "ok_pages %llu\n"
1509 "fail_pages %llu\n",
1510 peer->stats.restore.cur_ok,
1511 peer->stats.restore.cur_fail,
1512 peer->stats.restore.cur_keep,
1513 peer->stats.restore.total_ok,
1514 peer->stats.restore.total_fail,
1515 peer->stats.restore.total_keep,
1516 peer->stats.restore.ok_pages,
1517 peer->stats.restore.fail_pages);
1518 }
1519
rebuild_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1520 static ssize_t rebuild_show(struct kobject *kobj, struct peer_attribute *attr,
1521 char *buf)
1522 {
1523 struct hmdfs_peer *peer = to_peer(kobj);
1524
1525 return snprintf(buf, PAGE_SIZE,
1526 "cur_ok %u\n"
1527 "cur_fail %u\n"
1528 "cur_invalid %u\n"
1529 "total_ok %u\n"
1530 "total_fail %u\n"
1531 "total_invalid %u\n"
1532 "time %u\n",
1533 peer->stats.rebuild.cur_ok,
1534 peer->stats.rebuild.cur_fail,
1535 peer->stats.rebuild.cur_invalid,
1536 peer->stats.rebuild.total_ok,
1537 peer->stats.rebuild.total_fail,
1538 peer->stats.rebuild.total_invalid,
1539 peer->stats.rebuild.time);
1540 }
1541
1542 static struct peer_attribute peer_features_attr = __ATTR_RO(features);
1543 static struct peer_attribute peer_event_attr = __ATTR_RO(event);
1544 static struct peer_attribute peer_stash_attr = __ATTR_RO(stash);
1545 static struct peer_attribute peer_restore_attr = __ATTR_RO(restore);
1546 static struct peer_attribute peer_rebuild_attr = __ATTR_RO(rebuild);
1547
1548 static struct attribute *peer_attrs[] = {
1549 &peer_features_attr.attr,
1550 &peer_event_attr.attr,
1551 &peer_stash_attr.attr,
1552 &peer_restore_attr.attr,
1553 &peer_rebuild_attr.attr,
1554 NULL,
1555 };
1556
peer_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1557 static ssize_t peer_attr_show(struct kobject *kobj, struct attribute *attr,
1558 char *buf)
1559 {
1560 struct peer_attribute *peer_attr = to_peer_attr(attr);
1561
1562 if (!peer_attr->show)
1563 return -EIO;
1564 return peer_attr->show(kobj, peer_attr, buf);
1565 }
1566
peer_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1567 static ssize_t peer_attr_store(struct kobject *kobj, struct attribute *attr,
1568 const char *buf, size_t len)
1569 {
1570 struct peer_attribute *peer_attr = to_peer_attr(attr);
1571
1572 if (!peer_attr->store)
1573 return -EIO;
1574 return peer_attr->store(kobj, peer_attr, buf, len);
1575 }
1576
1577 static const struct sysfs_ops peer_sysfs_ops = {
1578 .show = peer_attr_show,
1579 .store = peer_attr_store,
1580 };
1581
peer_sysfs_release(struct kobject * kobj)1582 static void peer_sysfs_release(struct kobject *kobj)
1583 {
1584 struct hmdfs_peer *peer = to_peer(kobj);
1585
1586 complete(&peer->kobj_unregister);
1587 }
1588
1589 static struct kobj_type peer_ktype = {
1590 .sysfs_ops = &peer_sysfs_ops,
1591 .default_attrs = peer_attrs,
1592 .release = peer_sysfs_release,
1593 };
1594
hmdfs_register_peer_sysfs(struct hmdfs_sb_info * sbi,struct hmdfs_peer * peer)1595 int hmdfs_register_peer_sysfs(struct hmdfs_sb_info *sbi,
1596 struct hmdfs_peer *peer)
1597 {
1598 int err = 0;
1599
1600 init_completion(&peer->kobj_unregister);
1601 err = kobject_init_and_add(&peer->kobj, &peer_ktype, &sbi->kobj,
1602 "peer_%llu", peer->device_id);
1603 return err;
1604 }
1605
hmdfs_release_peer_sysfs(struct hmdfs_peer * peer)1606 void hmdfs_release_peer_sysfs(struct hmdfs_peer *peer)
1607 {
1608 kobject_del(&peer->kobj);
1609 kobject_put(&peer->kobj);
1610 wait_for_completion(&peer->kobj_unregister);
1611 }
1612
notify(struct hmdfs_peer * node,struct notify_param * param)1613 void notify(struct hmdfs_peer *node, struct notify_param *param)
1614 {
1615 struct hmdfs_sb_info *sbi = node->sbi;
1616 int in_len;
1617
1618 if (!param)
1619 return;
1620 spin_lock(&sbi->notify_fifo_lock);
1621 in_len =
1622 kfifo_in(&sbi->notify_fifo, param, sizeof(struct notify_param));
1623 spin_unlock(&sbi->notify_fifo_lock);
1624 if (in_len != sizeof(struct notify_param))
1625 return;
1626 sysfs_notify(&sbi->kobj, NULL, "cmd");
1627 }
1628
hmdfs_sysfs_init(void)1629 int hmdfs_sysfs_init(void)
1630 {
1631 hmdfs_kset = kset_create_and_add("hmdfs", NULL, fs_kobj);
1632 if (!hmdfs_kset)
1633 return -ENOMEM;
1634
1635 return 0;
1636 }
1637
hmdfs_sysfs_exit(void)1638 void hmdfs_sysfs_exit(void)
1639 {
1640 kset_unregister(hmdfs_kset);
1641 hmdfs_kset = NULL;
1642 }
1643