1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/comm/device_node.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "device_node.h"
9
10 #include <linux/errno.h>
11 #include <linux/fs.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kfifo.h>
15 #include <linux/module.h>
16 #include <linux/string.h>
17 #include <linux/sysfs.h>
18 #include <linux/types.h>
19 #include <linux/backing-dev.h>
20
21 #include "client_writeback.h"
22 #include "server_writeback.h"
23 #include "connection.h"
24 #include "hmdfs_client.h"
25 #include "socket_adapter.h"
26 #include "authority/authentication.h"
27
28 DEFINE_MUTEX(hmdfs_sysfs_mutex);
29 static struct kset *hmdfs_kset;
30
ctrl_cmd_update_socket_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)31 static void ctrl_cmd_update_socket_handler(const char *buf, size_t len,
32 struct hmdfs_sb_info *sbi)
33 {
34 struct update_socket_param cmd;
35 struct hmdfs_peer *node = NULL;
36 struct connection *conn = NULL;
37
38 if (unlikely(!buf || len != sizeof(cmd))) {
39 hmdfs_err("len/buf error");
40 goto out;
41 }
42 memcpy(&cmd, buf, sizeof(cmd));
43 if (cmd.status != CONNECT_STAT_WAIT_REQUEST &&
44 cmd.status != CONNECT_STAT_WAIT_RESPONSE) {
45 hmdfs_err("invalid status");
46 goto out;
47 }
48
49 node = hmdfs_get_peer(sbi, cmd.cid, cmd.devsl);
50 if (unlikely(!node)) {
51 hmdfs_err("failed to update ctrl node: cannot get peer");
52 goto out;
53 }
54
55 conn = hmdfs_get_conn_tcp(node, cmd.newfd, cmd.masterkey, cmd.status);
56 if (unlikely(!conn)) {
57 hmdfs_err("failed to update ctrl node: cannot get conn");
58 } else if (!sbi->system_cred) {
59 const struct cred *system_cred = get_cred(current_cred());
60
61 if (cmpxchg_relaxed(&sbi->system_cred, NULL, system_cred))
62 put_cred(system_cred);
63 else
64 hmdfs_check_cred(system_cred);
65 }
66
67 if (conn)
68 connection_put(conn);
69 out:
70 if (node)
71 peer_put(node);
72 }
73
ctrl_cmd_update_devsl_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)74 static void ctrl_cmd_update_devsl_handler(const char *buf, size_t len,
75 struct hmdfs_sb_info *sbi)
76 {
77 struct update_devsl_param cmd;
78 struct hmdfs_peer *node = NULL;
79
80 if (unlikely(!buf || len != sizeof(cmd))) {
81 hmdfs_err("Recved a invalid userbuf");
82 return;
83 }
84 memcpy(&cmd, buf, sizeof(cmd));
85
86 node = hmdfs_lookup_from_cid(sbi, cmd.cid);
87 if (unlikely(!node)) {
88 hmdfs_err("failed to update devsl: cannot get peer");
89 return;
90 }
91 hmdfs_info("Found peer: device_id = %llu", node->device_id);
92 node->devsl = cmd.devsl;
93 peer_put(node);
94 }
95
hmdfs_disconnect_node_marked(struct hmdfs_peer * conn)96 static inline void hmdfs_disconnect_node_marked(struct hmdfs_peer *conn)
97 {
98 hmdfs_start_process_offline(conn);
99 hmdfs_disconnect_node(conn);
100 hmdfs_stop_process_offline(conn);
101 }
102
ctrl_cmd_off_line_handler(const char * buf,size_t len,struct hmdfs_sb_info * sbi)103 static void ctrl_cmd_off_line_handler(const char *buf, size_t len,
104 struct hmdfs_sb_info *sbi)
105 {
106 struct offline_param cmd;
107 struct hmdfs_peer *node = NULL;
108
109 if (unlikely(!buf || len != sizeof(cmd))) {
110 hmdfs_err("Recved a invalid userbuf");
111 return;
112 }
113 memcpy(&cmd, buf, sizeof(cmd));
114 node = hmdfs_lookup_from_cid(sbi, cmd.remote_cid);
115 if (unlikely(!node)) {
116 hmdfs_err("Cannot find node by device");
117 return;
118 }
119 hmdfs_info("Found peer: device_id = %llu", node->device_id);
120 hmdfs_disconnect_node_marked(node);
121 peer_put(node);
122 }
123
124 typedef void (*ctrl_cmd_handler)(const char *buf, size_t len,
125 struct hmdfs_sb_info *sbi);
126
127 static const ctrl_cmd_handler cmd_handler[CMD_CNT] = {
128 [CMD_UPDATE_SOCKET] = ctrl_cmd_update_socket_handler,
129 [CMD_UPDATE_DEVSL] = ctrl_cmd_update_devsl_handler,
130 [CMD_OFF_LINE] = ctrl_cmd_off_line_handler,
131 };
132
sbi_cmd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)133 static ssize_t sbi_cmd_show(struct kobject *kobj, struct sbi_attribute *attr,
134 char *buf)
135 {
136 struct notify_param param;
137 int out_len;
138 struct hmdfs_sb_info *sbi = to_sbi(kobj);
139
140 memset(¶m, 0, sizeof(param));
141 spin_lock(&sbi->notify_fifo_lock);
142 out_len = kfifo_out(&sbi->notify_fifo, ¶m, sizeof(param));
143 spin_unlock(&sbi->notify_fifo_lock);
144 if (out_len != sizeof(param))
145 param.notify = NOTIFY_NONE;
146 memcpy(buf, ¶m, sizeof(param));
147 return sizeof(param);
148 }
149
cmd2str(int cmd)150 static const char *cmd2str(int cmd)
151 {
152 switch (cmd) {
153 case 0:
154 return "CMD_UPDATE_SOCKET";
155 case 1:
156 return "CMD_UPDATE_DEVSL";
157 case 2:
158 return "CMD_OFF_LINE";
159 default:
160 return "illegal cmd";
161 }
162 }
163
sbi_cmd_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)164 static ssize_t sbi_cmd_store(struct kobject *kobj, struct sbi_attribute *attr,
165 const char *buf, size_t len)
166 {
167 int cmd;
168 struct hmdfs_sb_info *sbi = to_sbi(kobj);
169
170 if (!sbi) {
171 hmdfs_info("Fatal! Empty sbi. Mount fs first");
172 return len;
173 }
174 if (len < sizeof(int)) {
175 hmdfs_err("Illegal cmd: cmd len = %zu", len);
176 return len;
177 }
178 cmd = *(int *)buf;
179 if (cmd < 0 || cmd >= CMD_CNT) {
180 hmdfs_err("Illegal cmd : cmd = %d", cmd);
181 return len;
182 }
183 mutex_lock(&sbi->cmd_handler_mutex);
184 hmdfs_info("Recved cmd: %s", cmd2str(cmd));
185 if (cmd_handler[cmd])
186 cmd_handler[cmd](buf, len, sbi);
187 mutex_unlock(&sbi->cmd_handler_mutex);
188 return len;
189 }
190
191 static struct sbi_attribute sbi_cmd_attr =
192 __ATTR(cmd, 0664, sbi_cmd_show, sbi_cmd_store);
193
sbi_status_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)194 static ssize_t sbi_status_show(struct kobject *kobj, struct sbi_attribute *attr,
195 char *buf)
196 {
197 ssize_t size = 0;
198 struct hmdfs_sb_info *sbi = NULL;
199 struct hmdfs_peer *peer = NULL;
200 struct connection *conn_impl = NULL;
201 struct tcp_handle *tcp = NULL;
202
203 sbi = to_sbi(kobj);
204 size += snprintf(buf + size, PAGE_SIZE - size, "peers status\n");
205
206 mutex_lock(&sbi->connections.node_lock);
207 list_for_each_entry(peer, &sbi->connections.node_list, list) {
208 size += snprintf(buf + size, PAGE_SIZE - size, "%llu %d\n",
209 peer->device_id, peer->status);
210 // connection information
211 size += snprintf(
212 buf + size, PAGE_SIZE - size,
213 "\t socket_fd connection_status tcp_status ... refcnt\n");
214 mutex_lock(&peer->conn_impl_list_lock);
215 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
216 tcp = conn_impl->connect_handle;
217 size += snprintf(buf + size, PAGE_SIZE - size,
218 "\t %d \t%d \t%d \t%p \t%ld\n",
219 tcp->fd, conn_impl->status,
220 tcp->sock->state, tcp->sock, file_count(tcp->sock->file));
221 }
222 mutex_unlock(&peer->conn_impl_list_lock);
223 }
224 mutex_unlock(&sbi->connections.node_lock);
225 return size;
226 }
227
sbi_status_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)228 static ssize_t sbi_status_store(struct kobject *kobj,
229 struct sbi_attribute *attr, const char *buf,
230 size_t len)
231 {
232 return len;
233 }
234
235 static struct sbi_attribute sbi_status_attr =
236 __ATTR(status, 0664, sbi_status_show, sbi_status_store);
237
sbi_stat_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)238 static ssize_t sbi_stat_show(struct kobject *kobj, struct sbi_attribute *attr,
239 char *buf)
240 {
241 ssize_t size = 0;
242 struct hmdfs_sb_info *sbi = NULL;
243 struct hmdfs_peer *peer = NULL;
244 struct connection *conn_impl = NULL;
245 struct tcp_handle *tcp = NULL;
246
247 sbi = to_sbi(kobj);
248 mutex_lock(&sbi->connections.node_lock);
249 list_for_each_entry(peer, &sbi->connections.node_list, list) {
250 // connection information
251 mutex_lock(&peer->conn_impl_list_lock);
252 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
253 tcp = conn_impl->connect_handle;
254 size += snprintf(buf + size, PAGE_SIZE - size,
255 "socket_fd: %d\n", tcp->fd);
256 size += snprintf(buf + size, PAGE_SIZE - size,
257 "\tsend_msg %d \tsend_bytes %llu\n",
258 conn_impl->stat.send_message_count,
259 conn_impl->stat.send_bytes);
260 size += snprintf(buf + size, PAGE_SIZE - size,
261 "\trecv_msg %d \trecv_bytes %llu\n",
262 conn_impl->stat.recv_message_count,
263 conn_impl->stat.recv_bytes);
264 }
265 mutex_unlock(&peer->conn_impl_list_lock);
266 }
267 mutex_unlock(&sbi->connections.node_lock);
268 return size;
269 }
270
sbi_stat_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)271 static ssize_t sbi_stat_store(struct kobject *kobj, struct sbi_attribute *attr,
272 const char *buf, size_t len)
273 {
274 struct hmdfs_sb_info *sbi = NULL;
275 struct hmdfs_peer *peer = NULL;
276 struct connection *conn_impl = NULL;
277
278 sbi = to_sbi(kobj);
279 mutex_lock(&sbi->connections.node_lock);
280 list_for_each_entry(peer, &sbi->connections.node_list, list) {
281 // connection information
282 mutex_lock(&peer->conn_impl_list_lock);
283 list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
284 conn_impl->stat.send_message_count = 0;
285 conn_impl->stat.send_bytes = 0;
286 conn_impl->stat.recv_message_count = 0;
287 conn_impl->stat.recv_bytes = 0;
288 }
289 mutex_unlock(&peer->conn_impl_list_lock);
290 }
291 mutex_unlock(&sbi->connections.node_lock);
292 return len;
293 }
294
295 static struct sbi_attribute sbi_statistic_attr =
296 __ATTR(statistic, 0664, sbi_stat_show, sbi_stat_store);
297
sbi_dcache_precision_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)298 static ssize_t sbi_dcache_precision_show(struct kobject *kobj,
299 struct sbi_attribute *attr, char *buf)
300 {
301 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->dcache_precision);
302 }
303
304 #define PRECISION_MAX 3600000
305
sbi_dcache_precision_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)306 static ssize_t sbi_dcache_precision_store(struct kobject *kobj,
307 struct sbi_attribute *attr,
308 const char *buf, size_t len)
309 {
310 int ret;
311 unsigned int precision;
312 struct hmdfs_sb_info *sbi = to_sbi(kobj);
313
314 ret = kstrtouint(skip_spaces(buf), 0, &precision);
315 if (!ret) {
316 if (precision <= PRECISION_MAX)
317 sbi->dcache_precision = precision;
318 else
319 ret = -EINVAL;
320 }
321
322 return ret ? ret : len;
323 }
324
325 static struct sbi_attribute sbi_dcache_precision_attr =
326 __ATTR(dcache_precision, 0664, sbi_dcache_precision_show,
327 sbi_dcache_precision_store);
328
sbi_dcache_threshold_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)329 static ssize_t sbi_dcache_threshold_show(struct kobject *kobj,
330 struct sbi_attribute *attr, char *buf)
331 {
332 return snprintf(buf, PAGE_SIZE, "%lu\n",
333 to_sbi(kobj)->dcache_threshold);
334 }
335
sbi_dcache_threshold_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)336 static ssize_t sbi_dcache_threshold_store(struct kobject *kobj,
337 struct sbi_attribute *attr,
338 const char *buf, size_t len)
339 {
340 int ret;
341 unsigned long threshold;
342 struct hmdfs_sb_info *sbi = to_sbi(kobj);
343
344 ret = kstrtoul(skip_spaces(buf), 0, &threshold);
345 if (!ret)
346 sbi->dcache_threshold = threshold;
347
348 return ret ? ret : len;
349 }
350
351 static struct sbi_attribute sbi_dcache_threshold_attr =
352 __ATTR(dcache_threshold, 0664, sbi_dcache_threshold_show,
353 sbi_dcache_threshold_store);
354
server_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)355 static ssize_t server_statistic_show(struct kobject *kobj,
356 struct sbi_attribute *attr, char *buf)
357 {
358 int i, ret;
359 const size_t size = PAGE_SIZE - 1;
360 ssize_t pos = 0;
361 struct server_statistic *stat = to_sbi(kobj)->s_server_statis;
362
363 for (i = 0; i < F_SIZE; i++) {
364
365 ret = snprintf(buf + pos, size - pos,
366 "%llu %u %llu %llu\n",
367 stat[i].cnt,
368 jiffies_to_msecs(stat[i].max),
369 stat[i].snd_cnt, stat[i].snd_fail_cnt);
370 if (ret > size - pos)
371 break;
372 pos += ret;
373 }
374
375 /* If break, we should add a new line */
376 if (i < F_SIZE) {
377 ret = snprintf(buf + pos, size + 1 - pos, "\n");
378 pos += ret;
379 }
380 return pos;
381 }
382
383 static struct sbi_attribute sbi_local_op_attr = __ATTR_RO(server_statistic);
384
client_statistic_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)385 static ssize_t client_statistic_show(struct kobject *kobj,
386 struct sbi_attribute *attr, char *buf)
387 {
388 int i, ret;
389 const size_t size = PAGE_SIZE - 1;
390 ssize_t pos = 0;
391 struct client_statistic *stat = to_sbi(kobj)->s_client_statis;
392
393 for (i = 0; i < F_SIZE; i++) {
394
395 ret = snprintf(buf + pos, size - pos,
396 "%llu %llu %llu %llu %llu %u\n",
397 stat[i].snd_cnt,
398 stat[i].snd_fail_cnt,
399 stat[i].resp_cnt,
400 stat[i].timeout_cnt,
401 stat[i].delay_resp_cnt,
402 jiffies_to_msecs(stat[i].max));
403 if (ret > size - pos)
404 break;
405 pos += ret;
406 }
407
408 /* If break, we should add a new line */
409 if (i < F_SIZE) {
410 ret = snprintf(buf + pos, size + 1 - pos, "\n");
411 pos += ret;
412 }
413
414 return pos;
415 }
416
417 static struct sbi_attribute sbi_delay_resp_attr = __ATTR_RO(client_statistic);
418
pages_to_kbytes(unsigned long page)419 static inline unsigned long pages_to_kbytes(unsigned long page)
420 {
421 return page << (PAGE_SHIFT - 10);
422 }
423
dirty_writeback_stats_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)424 static ssize_t dirty_writeback_stats_show(struct kobject *kobj,
425 struct sbi_attribute *attr,
426 char *buf)
427 {
428 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
429 struct hmdfs_writeback *hwb = sbi->h_wb;
430 unsigned long avg;
431 unsigned long max;
432 unsigned long min;
433
434 spin_lock(&hwb->write_bandwidth_lock);
435 avg = hwb->avg_write_bandwidth;
436 max = hwb->max_write_bandwidth;
437 min = hwb->min_write_bandwidth;
438 spin_unlock(&hwb->write_bandwidth_lock);
439
440 if (min == ULONG_MAX)
441 min = 0;
442
443 return snprintf(buf, PAGE_SIZE,
444 "%10lu\n"
445 "%10lu\n"
446 "%10lu\n",
447 pages_to_kbytes(avg),
448 pages_to_kbytes(max),
449 pages_to_kbytes(min));
450 }
451
452 static struct sbi_attribute sbi_dirty_writeback_stats_attr =
453 __ATTR_RO(dirty_writeback_stats);
454
sbi_wb_timeout_ms_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)455 static ssize_t sbi_wb_timeout_ms_show(struct kobject *kobj,
456 struct sbi_attribute *attr,
457 char *buf)
458 {
459 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
460
461 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->wb_timeout_ms);
462 }
463
sbi_wb_timeout_ms_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)464 static ssize_t sbi_wb_timeout_ms_store(struct kobject *kobj,
465 struct sbi_attribute *attr,
466 const char *buf, size_t len)
467 {
468 struct hmdfs_sb_info *sbi = to_sbi(kobj);
469 unsigned int val;
470 int err;
471
472 err = kstrtouint(buf, 10, &val);
473 if (err)
474 return err;
475
476 if (!val || val > HMDFS_MAX_WB_TIMEOUT_MS)
477 return -EINVAL;
478
479 sbi->wb_timeout_ms = val;
480
481 return len;
482 }
483
484 static struct sbi_attribute sbi_wb_timeout_ms_attr =
485 __ATTR(wb_timeout_ms, 0664, sbi_wb_timeout_ms_show,
486 sbi_wb_timeout_ms_store);
487
sbi_dirty_writeback_centisecs_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)488 static ssize_t sbi_dirty_writeback_centisecs_show(struct kobject *kobj,
489 struct sbi_attribute *attr,
490 char *buf)
491 {
492 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
493
494 return snprintf(buf, PAGE_SIZE, "%u\n",
495 sbi->h_wb->dirty_writeback_interval);
496 }
497
sbi_dirty_writeback_centisecs_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)498 static ssize_t sbi_dirty_writeback_centisecs_store(struct kobject *kobj,
499 struct sbi_attribute *attr,
500 const char *buf, size_t len)
501 {
502 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
503 int err;
504
505 err = kstrtouint(buf, 10, &sbi->h_wb->dirty_writeback_interval);
506 if (err)
507 return err;
508 return len;
509 }
510
511 static struct sbi_attribute sbi_dirty_writeback_centisecs_attr =
512 __ATTR(dirty_writeback_centisecs, 0664,
513 sbi_dirty_writeback_centisecs_show,
514 sbi_dirty_writeback_centisecs_store);
515
sbi_dirty_file_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)516 static ssize_t sbi_dirty_file_background_bytes_show(struct kobject *kobj,
517 struct sbi_attribute *attr,
518 char *buf)
519 {
520 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
521
522 return snprintf(buf, PAGE_SIZE, "%lu\n",
523 sbi->h_wb->dirty_file_bg_bytes);
524 }
525
sbi_dirty_file_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)526 static ssize_t sbi_dirty_file_background_bytes_store(struct kobject *kobj,
527 struct sbi_attribute *attr,
528 const char *buf,
529 size_t len)
530 {
531 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
532 unsigned long file_background_bytes = 0;
533 int err;
534
535 err = kstrtoul(buf, 10, &file_background_bytes);
536 if (err)
537 return err;
538 if (file_background_bytes == 0)
539 return -EINVAL;
540
541 sbi->h_wb->dirty_fs_bytes =
542 max(sbi->h_wb->dirty_fs_bytes, file_background_bytes);
543 sbi->h_wb->dirty_fs_bg_bytes =
544 max(sbi->h_wb->dirty_fs_bg_bytes, file_background_bytes);
545 sbi->h_wb->dirty_file_bytes =
546 max(sbi->h_wb->dirty_file_bytes, file_background_bytes);
547
548 sbi->h_wb->dirty_file_bg_bytes = file_background_bytes;
549 hmdfs_calculate_dirty_thresh(sbi->h_wb);
550 hmdfs_update_ratelimit(sbi->h_wb);
551 return len;
552 }
553
sbi_dirty_fs_background_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)554 static ssize_t sbi_dirty_fs_background_bytes_show(struct kobject *kobj,
555 struct sbi_attribute *attr,
556 char *buf)
557 {
558 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
559
560 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bg_bytes);
561 }
562
sbi_dirty_fs_background_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)563 static ssize_t sbi_dirty_fs_background_bytes_store(struct kobject *kobj,
564 struct sbi_attribute *attr,
565 const char *buf, size_t len)
566 {
567 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
568 unsigned long fs_background_bytes = 0;
569 int err;
570
571 err = kstrtoul(buf, 10, &fs_background_bytes);
572 if (err)
573 return err;
574 if (fs_background_bytes == 0)
575 return -EINVAL;
576
577 sbi->h_wb->dirty_file_bg_bytes =
578 min(sbi->h_wb->dirty_file_bg_bytes, fs_background_bytes);
579 sbi->h_wb->dirty_fs_bytes =
580 max(sbi->h_wb->dirty_fs_bytes, fs_background_bytes);
581
582 sbi->h_wb->dirty_fs_bg_bytes = fs_background_bytes;
583 hmdfs_calculate_dirty_thresh(sbi->h_wb);
584 hmdfs_update_ratelimit(sbi->h_wb);
585 return len;
586 }
587
588 static struct sbi_attribute sbi_dirty_file_background_bytes_attr =
589 __ATTR(dirty_file_background_bytes, 0644,
590 sbi_dirty_file_background_bytes_show,
591 sbi_dirty_file_background_bytes_store);
592 static struct sbi_attribute sbi_dirty_fs_background_bytes_attr =
593 __ATTR(dirty_fs_background_bytes, 0644,
594 sbi_dirty_fs_background_bytes_show,
595 sbi_dirty_fs_background_bytes_store);
596
sbi_dirty_file_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)597 static ssize_t sbi_dirty_file_bytes_show(struct kobject *kobj,
598 struct sbi_attribute *attr, char *buf)
599 {
600 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
601
602 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_file_bytes);
603 }
604
sbi_dirty_file_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)605 static ssize_t sbi_dirty_file_bytes_store(struct kobject *kobj,
606 struct sbi_attribute *attr,
607 const char *buf, size_t len)
608 {
609 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
610 unsigned long file_bytes = 0;
611 int err;
612
613 err = kstrtoul(buf, 10, &file_bytes);
614 if (err)
615 return err;
616 if (file_bytes == 0)
617 return -EINVAL;
618
619 sbi->h_wb->dirty_file_bg_bytes =
620 min(sbi->h_wb->dirty_file_bg_bytes, file_bytes);
621 sbi->h_wb->dirty_fs_bytes = max(sbi->h_wb->dirty_fs_bytes, file_bytes);
622
623 sbi->h_wb->dirty_file_bytes = file_bytes;
624 hmdfs_calculate_dirty_thresh(sbi->h_wb);
625 hmdfs_update_ratelimit(sbi->h_wb);
626 return len;
627 }
628
sbi_dirty_fs_bytes_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)629 static ssize_t sbi_dirty_fs_bytes_show(struct kobject *kobj,
630 struct sbi_attribute *attr, char *buf)
631 {
632 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
633
634 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bytes);
635 }
636
sbi_dirty_fs_bytes_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)637 static ssize_t sbi_dirty_fs_bytes_store(struct kobject *kobj,
638 struct sbi_attribute *attr,
639 const char *buf, size_t len)
640 {
641 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
642 unsigned long fs_bytes = 0;
643 int err;
644
645 err = kstrtoul(buf, 10, &fs_bytes);
646 if (err)
647 return err;
648 if (fs_bytes == 0)
649 return -EINVAL;
650
651 sbi->h_wb->dirty_file_bg_bytes =
652 min(sbi->h_wb->dirty_file_bg_bytes, fs_bytes);
653 sbi->h_wb->dirty_file_bytes =
654 min(sbi->h_wb->dirty_file_bytes, fs_bytes);
655 sbi->h_wb->dirty_fs_bg_bytes =
656 min(sbi->h_wb->dirty_fs_bg_bytes, fs_bytes);
657
658 sbi->h_wb->dirty_fs_bytes = fs_bytes;
659 hmdfs_calculate_dirty_thresh(sbi->h_wb);
660 hmdfs_update_ratelimit(sbi->h_wb);
661 return len;
662 }
663
664 static struct sbi_attribute sbi_dirty_file_bytes_attr =
665 __ATTR(dirty_file_bytes, 0644, sbi_dirty_file_bytes_show,
666 sbi_dirty_file_bytes_store);
667 static struct sbi_attribute sbi_dirty_fs_bytes_attr =
668 __ATTR(dirty_fs_bytes, 0644, sbi_dirty_fs_bytes_show,
669 sbi_dirty_fs_bytes_store);
670
sbi_dirty_writeback_timelimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)671 static ssize_t sbi_dirty_writeback_timelimit_show(struct kobject *kobj,
672 struct sbi_attribute *attr,
673 char *buf)
674 {
675 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
676
677 return snprintf(buf, PAGE_SIZE, "%u\n",
678 sbi->h_wb->writeback_timelimit / HZ);
679 }
680
sbi_dirty_writeback_timelimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)681 static ssize_t sbi_dirty_writeback_timelimit_store(struct kobject *kobj,
682 struct sbi_attribute *attr,
683 const char *buf,
684 size_t len)
685 {
686 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
687 unsigned int time_limit = 0;
688 int err;
689
690 err = kstrtouint(buf, 10, &time_limit);
691 if (err)
692 return err;
693 if (time_limit == 0 || time_limit > (HMDFS_MAX_WB_TIMELIMIT / HZ))
694 return -EINVAL;
695
696 sbi->h_wb->writeback_timelimit = time_limit * HZ;
697 return len;
698 }
699
700 static struct sbi_attribute sbi_dirty_writeback_timelimit_attr =
701 __ATTR(dirty_writeback_timelimit, 0644, sbi_dirty_writeback_timelimit_show,
702 sbi_dirty_writeback_timelimit_store);
703
sbi_dirty_thresh_lowerlimit_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)704 static ssize_t sbi_dirty_thresh_lowerlimit_show(struct kobject *kobj,
705 struct sbi_attribute *attr,
706 char *buf)
707 {
708 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
709
710 return snprintf(buf, PAGE_SIZE, "%lu\n",
711 sbi->h_wb->bw_thresh_lowerlimit << PAGE_SHIFT);
712 }
713
sbi_dirty_thresh_lowerlimit_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)714 static ssize_t sbi_dirty_thresh_lowerlimit_store(struct kobject *kobj,
715 struct sbi_attribute *attr,
716 const char *buf,
717 size_t len)
718 {
719 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
720 unsigned long bw_thresh_lowerbytes = 0;
721 unsigned long bw_thresh_lowerlimit;
722 int err;
723
724 err = kstrtoul(buf, 10, &bw_thresh_lowerbytes);
725 if (err)
726 return err;
727
728 bw_thresh_lowerlimit = DIV_ROUND_UP(bw_thresh_lowerbytes, PAGE_SIZE);
729 if (bw_thresh_lowerlimit < HMDFS_BW_THRESH_MIN_LIMIT ||
730 bw_thresh_lowerlimit > HMDFS_BW_THRESH_MAX_LIMIT)
731 return -EINVAL;
732
733 sbi->h_wb->bw_thresh_lowerlimit = bw_thresh_lowerlimit;
734 return len;
735 }
736
737 static struct sbi_attribute sbi_dirty_thresh_lowerlimit_attr =
738 __ATTR(dirty_thresh_lowerlimit, 0644, sbi_dirty_thresh_lowerlimit_show,
739 sbi_dirty_thresh_lowerlimit_store);
740
sbi_dirty_writeback_autothresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)741 static ssize_t sbi_dirty_writeback_autothresh_show(struct kobject *kobj,
742 struct sbi_attribute *attr,
743 char *buf)
744 {
745 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
746
747 return snprintf(buf, PAGE_SIZE, "%d\n",
748 sbi->h_wb->dirty_auto_threshold);
749 }
750
sbi_dirty_writeback_autothresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)751 static ssize_t sbi_dirty_writeback_autothresh_store(struct kobject *kobj,
752 struct sbi_attribute *attr,
753 const char *buf,
754 size_t len)
755 {
756 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
757 bool dirty_auto_threshold = false;
758 int err;
759
760 err = kstrtobool(buf, &dirty_auto_threshold);
761 if (err)
762 return err;
763
764 sbi->h_wb->dirty_auto_threshold = dirty_auto_threshold;
765 return len;
766 }
767
768 static struct sbi_attribute sbi_dirty_writeback_autothresh_attr =
769 __ATTR(dirty_writeback_autothresh, 0644, sbi_dirty_writeback_autothresh_show,
770 sbi_dirty_writeback_autothresh_store);
771
sbi_dirty_writeback_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)772 static ssize_t sbi_dirty_writeback_control_show(struct kobject *kobj,
773 struct sbi_attribute *attr,
774 char *buf)
775 {
776 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
777
778 return snprintf(buf, PAGE_SIZE, "%d\n",
779 sbi->h_wb->dirty_writeback_control);
780 }
781
sbi_dirty_writeback_control_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)782 static ssize_t sbi_dirty_writeback_control_store(struct kobject *kobj,
783 struct sbi_attribute *attr,
784 const char *buf, size_t len)
785 {
786 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
787 unsigned int dirty_writeback_control = 0;
788 int err;
789
790 err = kstrtouint(buf, 10, &dirty_writeback_control);
791 if (err)
792 return err;
793
794 sbi->h_wb->dirty_writeback_control = (bool)dirty_writeback_control;
795 return len;
796 }
797
798 static struct sbi_attribute sbi_dirty_writeback_control_attr =
799 __ATTR(dirty_writeback_control, 0644, sbi_dirty_writeback_control_show,
800 sbi_dirty_writeback_control_store);
801
sbi_srv_dirty_thresh_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)802 static ssize_t sbi_srv_dirty_thresh_show(struct kobject *kobj,
803 struct sbi_attribute *attr,
804 char *buf)
805 {
806 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
807
808 return snprintf(buf, PAGE_SIZE, "%d\n",
809 sbi->h_swb->dirty_thresh_pg >> HMDFS_MB_TO_PAGE_SHIFT);
810 }
811
sbi_srv_dirty_thresh_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)812 static ssize_t sbi_srv_dirty_thresh_store(struct kobject *kobj,
813 struct sbi_attribute *attr,
814 const char *buf,
815 size_t len)
816 {
817 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
818 int dirty_thresh_mb;
819 unsigned long long pages;
820 int err;
821
822 err = kstrtoint(buf, 10, &dirty_thresh_mb);
823 if (err)
824 return err;
825
826 if (dirty_thresh_mb <= 0)
827 return -EINVAL;
828
829 pages = dirty_thresh_mb;
830 pages <<= HMDFS_MB_TO_PAGE_SHIFT;
831 if (pages > INT_MAX) {
832 hmdfs_err("Illegal dirty_thresh_mb %d, its page count beyonds max int",
833 dirty_thresh_mb);
834 return -EINVAL;
835 }
836
837 hswb->dirty_thresh_pg = (unsigned int)pages;
838 return len;
839 }
840
841 static struct sbi_attribute sbi_srv_dirty_thresh_attr =
842 __ATTR(srv_dirty_thresh, 0644, sbi_srv_dirty_thresh_show,
843 sbi_srv_dirty_thresh_store);
844
845
sbi_srv_dirty_wb_control_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)846 static ssize_t sbi_srv_dirty_wb_control_show(struct kobject *kobj,
847 struct sbi_attribute *attr,
848 char *buf)
849 {
850 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
851
852 return snprintf(buf, PAGE_SIZE, "%d\n",
853 sbi->h_swb->dirty_writeback_control);
854 }
855
sbi_srv_dirty_wb_conctrol_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)856 static ssize_t sbi_srv_dirty_wb_conctrol_store(struct kobject *kobj,
857 struct sbi_attribute *attr,
858 const char *buf,
859 size_t len)
860 {
861 struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
862 bool dirty_writeback_control = true;
863 int err;
864
865 err = kstrtobool(buf, &dirty_writeback_control);
866 if (err)
867 return err;
868
869 hswb->dirty_writeback_control = dirty_writeback_control;
870
871 return len;
872 }
873
874 static struct sbi_attribute sbi_srv_dirty_wb_control_attr =
875 __ATTR(srv_dirty_writeback_control, 0644, sbi_srv_dirty_wb_control_show,
876 sbi_srv_dirty_wb_conctrol_store);
877
sbi_dcache_timeout_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)878 static ssize_t sbi_dcache_timeout_show(struct kobject *kobj,
879 struct sbi_attribute *attr, char *buf)
880 {
881 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
882
883 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->dcache_timeout);
884 }
885
sbi_dcache_timeout_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)886 static ssize_t sbi_dcache_timeout_store(struct kobject *kobj,
887 struct sbi_attribute *attr,
888 const char *buf, size_t len)
889 {
890 struct hmdfs_sb_info *sbi = to_sbi(kobj);
891 unsigned int timeout;
892 int err;
893
894 err = kstrtouint(buf, 0, &timeout);
895 if (err)
896 return err;
897
898 /* zero is invalid, and it doesn't mean no cache */
899 if (timeout == 0 || timeout > MAX_DCACHE_TIMEOUT)
900 return -EINVAL;
901
902 sbi->dcache_timeout = timeout;
903
904 return len;
905 }
906
907 static struct sbi_attribute sbi_dcache_timeout_attr =
908 __ATTR(dcache_timeout, 0644, sbi_dcache_timeout_show,
909 sbi_dcache_timeout_store);
910
sbi_write_cache_timeout_sec_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)911 static ssize_t sbi_write_cache_timeout_sec_show(struct kobject *kobj,
912 struct sbi_attribute *attr, char *buf)
913 {
914 return snprintf(buf, PAGE_SIZE, "%u\n",
915 to_sbi(kobj)->write_cache_timeout);
916 }
917
sbi_write_cache_timeout_sec_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)918 static ssize_t sbi_write_cache_timeout_sec_store(struct kobject *kobj,
919 struct sbi_attribute *attr, const char *buf, size_t len)
920 {
921 int ret;
922 unsigned int timeout;
923 struct hmdfs_sb_info *sbi = to_sbi(kobj);
924
925 ret = kstrtouint(buf, 0, &timeout);
926 if (ret)
927 return ret;
928
929 /* set write_cache_timeout to 0 means this functionality is disabled */
930 sbi->write_cache_timeout = timeout;
931
932 return len;
933 }
934
935 static struct sbi_attribute sbi_write_cache_timeout_sec_attr =
936 __ATTR(write_cache_timeout_sec, 0664, sbi_write_cache_timeout_sec_show,
937 sbi_write_cache_timeout_sec_store);
938
sbi_node_evt_cb_delay_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)939 static ssize_t sbi_node_evt_cb_delay_show(struct kobject *kobj,
940 struct sbi_attribute *attr,
941 char *buf)
942 {
943 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
944
945 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_cb_delay);
946 }
947
sbi_node_evt_cb_delay_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)948 static ssize_t sbi_node_evt_cb_delay_store(struct kobject *kobj,
949 struct sbi_attribute *attr,
950 const char *buf,
951 size_t len)
952 {
953 struct hmdfs_sb_info *sbi = to_sbi(kobj);
954 unsigned int delay = 0;
955 int err;
956
957 err = kstrtouint(buf, 10, &delay);
958 if (err)
959 return err;
960
961 sbi->async_cb_delay = delay;
962
963 return len;
964 }
965
966 static struct sbi_attribute sbi_node_evt_cb_delay_attr =
967 __ATTR(node_event_delay, 0644, sbi_node_evt_cb_delay_show,
968 sbi_node_evt_cb_delay_store);
969
calc_idr_number(struct idr * idr)970 static int calc_idr_number(struct idr *idr)
971 {
972 void *entry = NULL;
973 int id;
974 int number = 0;
975
976 idr_for_each_entry(idr, entry, id) {
977 number++;
978 if (number % HMDFS_IDR_RESCHED_COUNT == 0)
979 cond_resched();
980 }
981
982 return number;
983 }
984
sbi_show_idr_stats(struct kobject * kobj,struct sbi_attribute * attr,char * buf,bool showmsg)985 static ssize_t sbi_show_idr_stats(struct kobject *kobj,
986 struct sbi_attribute *attr,
987 char *buf, bool showmsg)
988 {
989 ssize_t size = 0;
990 int count;
991 struct hmdfs_sb_info *sbi = NULL;
992 struct hmdfs_peer *peer = NULL;
993 struct idr *idr = NULL;
994
995 sbi = to_sbi(kobj);
996
997 mutex_lock(&sbi->connections.node_lock);
998 list_for_each_entry(peer, &sbi->connections.node_list, list) {
999 idr = showmsg ? &peer->msg_idr : &peer->file_id_idr;
1000 count = calc_idr_number(idr);
1001 size += snprintf(buf + size, PAGE_SIZE - size,
1002 "device-id\tcount\tnext-id\n\t%llu\t\t%d\t%u\n",
1003 peer->device_id, count, idr_get_cursor(idr));
1004 if (size >= PAGE_SIZE) {
1005 size = PAGE_SIZE;
1006 break;
1007 }
1008 }
1009 mutex_unlock(&sbi->connections.node_lock);
1010
1011 return size;
1012 }
1013
pending_message_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1014 static ssize_t pending_message_show(struct kobject *kobj,
1015 struct sbi_attribute *attr,
1016 char *buf)
1017 {
1018 return sbi_show_idr_stats(kobj, attr, buf, true);
1019 }
1020
1021 static struct sbi_attribute sbi_pending_message_attr =
1022 __ATTR_RO(pending_message);
1023
peer_opened_fd_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1024 static ssize_t peer_opened_fd_show(struct kobject *kobj,
1025 struct sbi_attribute *attr, char *buf)
1026 {
1027 return sbi_show_idr_stats(kobj, attr, buf, false);
1028 }
1029
1030 static struct sbi_attribute sbi_peer_opened_fd_attr = __ATTR_RO(peer_opened_fd);
1031
sbi_srv_req_max_active_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1032 static ssize_t sbi_srv_req_max_active_attr_show(struct kobject *kobj,
1033 struct sbi_attribute *attr,
1034 char *buf)
1035 {
1036 const struct hmdfs_sb_info *sbi = to_sbi(kobj);
1037
1038 return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_req_max_active);
1039 }
1040
sbi_srv_req_max_active_attr_store(struct kobject * kobj,struct sbi_attribute * attr,const char * buf,size_t len)1041 static ssize_t sbi_srv_req_max_active_attr_store(struct kobject *kobj,
1042 struct sbi_attribute *attr, const char *buf, size_t len)
1043 {
1044 int ret;
1045 unsigned int max_active;
1046 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1047
1048 ret = kstrtouint(buf, 0, &max_active);
1049 if (ret)
1050 return ret;
1051
1052 sbi->async_req_max_active = max_active;
1053
1054 return len;
1055 }
1056
1057 static struct sbi_attribute sbi_srv_req_max_active_attr =
1058 __ATTR(srv_req_handle_max_active, 0644, sbi_srv_req_max_active_attr_show,
1059 sbi_srv_req_max_active_attr_store);
1060
1061
cache_file_show(struct hmdfs_sb_info * sbi,struct list_head * head,char * buf)1062 static ssize_t cache_file_show(struct hmdfs_sb_info *sbi,
1063 struct list_head *head, char *buf)
1064 {
1065 struct cache_file_node *cfn = NULL;
1066 ssize_t pos = 0;
1067
1068 mutex_lock(&sbi->cache_list_lock);
1069 list_for_each_entry(cfn, head, list) {
1070 pos += snprintf(buf + pos, PAGE_SIZE - pos,
1071 "dev_id: %s relative_path: %s\n",
1072 cfn->cid, cfn->relative_path);
1073 if (pos >= PAGE_SIZE) {
1074 pos = PAGE_SIZE;
1075 break;
1076 }
1077 }
1078 mutex_unlock(&sbi->cache_list_lock);
1079
1080 return pos;
1081 }
1082
client_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1083 static ssize_t client_cache_file_show(struct kobject *kobj,
1084 struct sbi_attribute *attr, char *buf)
1085 {
1086 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->client_cache, buf);
1087 }
server_cache_file_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1088 static ssize_t server_cache_file_show(struct kobject *kobj,
1089 struct sbi_attribute *attr, char *buf)
1090 {
1091 return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->server_cache, buf);
1092 }
1093
1094 static struct sbi_attribute sbi_server_cache_file_attr =
1095 __ATTR_RO(server_cache_file);
1096 static struct sbi_attribute sbi_client_cache_file_attr =
1097 __ATTR_RO(client_cache_file);
1098
sb_seq_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1099 static ssize_t sb_seq_show(struct kobject *kobj, struct sbi_attribute *attr,
1100 char *buf)
1101 {
1102 return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->seq);
1103 }
1104
1105 static struct sbi_attribute sbi_seq_attr = __ATTR_RO(sb_seq);
1106
peers_sum_attr_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1107 static ssize_t peers_sum_attr_show(struct kobject *kobj,
1108 struct sbi_attribute *attr, char *buf)
1109 {
1110 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1111 struct hmdfs_peer *node = NULL;
1112 unsigned int stash_ok = 0, stash_fail = 0, restore_ok = 0,
1113 restore_fail = 0, rebuild_ok = 0, rebuild_fail = 0, rebuild_invalid = 0,
1114 rebuild_time = 0;
1115 unsigned long long stash_ok_pages = 0, stash_fail_pages = 0,
1116 restore_ok_pages = 0, restore_fail_pages = 0;
1117
1118 mutex_lock(&sbi->connections.node_lock);
1119 list_for_each_entry(node, &sbi->connections.node_list, list) {
1120 peer_get(node);
1121 mutex_unlock(&sbi->connections.node_lock);
1122 stash_ok += node->stats.stash.total_ok;
1123 stash_fail += node->stats.stash.total_fail;
1124 stash_ok_pages += node->stats.stash.ok_pages;
1125 stash_fail_pages += node->stats.stash.fail_pages;
1126 restore_ok += node->stats.restore.total_ok;
1127 restore_fail += node->stats.restore.total_fail;
1128 restore_ok_pages += node->stats.restore.ok_pages;
1129 restore_fail_pages += node->stats.restore.fail_pages;
1130 rebuild_ok += node->stats.rebuild.total_ok;
1131 rebuild_fail += node->stats.rebuild.total_fail;
1132 rebuild_invalid += node->stats.rebuild.total_invalid;
1133 rebuild_time += node->stats.rebuild.time;
1134 peer_put(node);
1135 mutex_lock(&sbi->connections.node_lock);
1136 }
1137 mutex_unlock(&sbi->connections.node_lock);
1138
1139 return snprintf(buf, PAGE_SIZE,
1140 "%u %u %llu %llu\n"
1141 "%u %u %llu %llu\n"
1142 "%u %u %u %u\n",
1143 stash_ok, stash_fail, stash_ok_pages, stash_fail_pages,
1144 restore_ok, restore_fail, restore_ok_pages,
1145 restore_fail_pages, rebuild_ok, rebuild_fail,
1146 rebuild_invalid, rebuild_time);
1147 }
1148
1149 static struct sbi_attribute sbi_peers_attr = __ATTR_RO(peers_sum_attr);
1150
1151 const char * const flag_name[] = {
1152 "READPAGES",
1153 "READPAGES_OPEN",
1154 "ATOMIC_OPEN",
1155 };
1156
fill_features(char * buf,unsigned long long flag)1157 static ssize_t fill_features(char *buf, unsigned long long flag)
1158 {
1159 int i;
1160 ssize_t pos = 0;
1161 bool sep = false;
1162 int flag_name_count = ARRAY_SIZE(flag_name) / sizeof(flag_name[0]);
1163
1164 for (i = 0; i < sizeof(flag) * BITS_PER_BYTE; ++i) {
1165 if (!(flag & BIT(i)))
1166 continue;
1167
1168 if (sep)
1169 pos += snprintf(buf + pos, PAGE_SIZE - pos, "|");
1170 sep = true;
1171
1172 if (pos >= PAGE_SIZE) {
1173 pos = PAGE_SIZE;
1174 break;
1175 }
1176
1177 if (i < flag_name_count && flag_name[i])
1178 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%s",
1179 flag_name[i]);
1180 else
1181 pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d", i);
1182
1183 if (pos >= PAGE_SIZE) {
1184 pos = PAGE_SIZE;
1185 break;
1186 }
1187 }
1188 pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1189 if (pos >= PAGE_SIZE)
1190 pos = PAGE_SIZE;
1191
1192 return pos;
1193 }
1194
sbi_features_show(struct kobject * kobj,struct sbi_attribute * attr,char * buf)1195 static ssize_t sbi_features_show(struct kobject *kobj,
1196 struct sbi_attribute *attr, char *buf)
1197 {
1198 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1199
1200 return fill_features(buf, sbi->s_features);
1201 }
1202
1203 static struct sbi_attribute sbi_features_attr = __ATTR(features, 0444,
1204 sbi_features_show, NULL);
1205
1206 static struct attribute *sbi_attrs[] = {
1207 &sbi_cmd_attr.attr,
1208 &sbi_status_attr.attr,
1209 &sbi_statistic_attr.attr,
1210 &sbi_dcache_precision_attr.attr,
1211 &sbi_dcache_threshold_attr.attr,
1212 &sbi_dcache_timeout_attr.attr,
1213 &sbi_write_cache_timeout_sec_attr.attr,
1214 &sbi_local_op_attr.attr,
1215 &sbi_delay_resp_attr.attr,
1216 &sbi_wb_timeout_ms_attr.attr,
1217 &sbi_dirty_writeback_centisecs_attr.attr,
1218 &sbi_dirty_file_background_bytes_attr.attr,
1219 &sbi_dirty_fs_background_bytes_attr.attr,
1220 &sbi_dirty_file_bytes_attr.attr,
1221 &sbi_dirty_fs_bytes_attr.attr,
1222 &sbi_dirty_writeback_autothresh_attr.attr,
1223 &sbi_dirty_writeback_timelimit_attr.attr,
1224 &sbi_dirty_thresh_lowerlimit_attr.attr,
1225 &sbi_dirty_writeback_control_attr.attr,
1226 &sbi_dirty_writeback_stats_attr.attr,
1227 &sbi_srv_dirty_thresh_attr.attr,
1228 &sbi_srv_dirty_wb_control_attr.attr,
1229 &sbi_node_evt_cb_delay_attr.attr,
1230 &sbi_srv_req_max_active_attr.attr,
1231 &sbi_pending_message_attr.attr,
1232 &sbi_peer_opened_fd_attr.attr,
1233 &sbi_server_cache_file_attr.attr,
1234 &sbi_client_cache_file_attr.attr,
1235 &sbi_seq_attr.attr,
1236 &sbi_peers_attr.attr,
1237 &sbi_features_attr.attr,
1238 NULL,
1239 };
1240
sbi_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1241 static ssize_t sbi_attr_show(struct kobject *kobj, struct attribute *attr,
1242 char *buf)
1243 {
1244 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1245
1246 if (!sbi_attr->show)
1247 return -EIO;
1248 return sbi_attr->show(kobj, sbi_attr, buf);
1249 }
1250
sbi_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1251 static ssize_t sbi_attr_store(struct kobject *kobj, struct attribute *attr,
1252 const char *buf, size_t len)
1253 {
1254 struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1255
1256 if (!sbi_attr->store)
1257 return -EIO;
1258 return sbi_attr->store(kobj, sbi_attr, buf, len);
1259 }
1260
1261 static const struct sysfs_ops sbi_sysfs_ops = {
1262 .show = sbi_attr_show,
1263 .store = sbi_attr_store,
1264 };
1265
sbi_release(struct kobject * kobj)1266 static void sbi_release(struct kobject *kobj)
1267 {
1268 struct hmdfs_sb_info *sbi = to_sbi(kobj);
1269
1270 complete(&sbi->s_kobj_unregister);
1271 }
1272
1273 static struct kobj_type sbi_ktype = {
1274 .sysfs_ops = &sbi_sysfs_ops,
1275 .default_attrs = sbi_attrs,
1276 .release = sbi_release,
1277 };
1278
to_sbi_cmd_attr(struct attribute * x)1279 static inline struct sbi_cmd_attribute *to_sbi_cmd_attr(struct attribute *x)
1280 {
1281 return container_of(x, struct sbi_cmd_attribute, attr);
1282 }
1283
cmd_kobj_to_sbi(struct kobject * x)1284 static inline struct hmdfs_sb_info *cmd_kobj_to_sbi(struct kobject *x)
1285 {
1286 return container_of(x, struct hmdfs_sb_info, s_cmd_timeout_kobj);
1287 }
1288
cmd_timeout_show(struct kobject * kobj,struct attribute * attr,char * buf)1289 static ssize_t cmd_timeout_show(struct kobject *kobj, struct attribute *attr,
1290 char *buf)
1291 {
1292 int cmd = to_sbi_cmd_attr(attr)->command;
1293 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1294
1295 if (cmd < 0 || cmd >= F_SIZE)
1296 return 0;
1297
1298 return snprintf(buf, PAGE_SIZE, "%u\n", get_cmd_timeout(sbi, cmd));
1299 }
1300
cmd_timeout_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1301 static ssize_t cmd_timeout_store(struct kobject *kobj, struct attribute *attr,
1302 const char *buf, size_t len)
1303 {
1304 unsigned int value;
1305 int cmd = to_sbi_cmd_attr(attr)->command;
1306 int ret = kstrtouint(skip_spaces(buf), 0, &value);
1307 struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1308
1309 if (cmd < 0 || cmd >= F_SIZE)
1310 return -EINVAL;
1311
1312 if (!ret)
1313 set_cmd_timeout(sbi, cmd, value);
1314
1315 return ret ? ret : len;
1316 }
1317
1318 #define HMDFS_CMD_ATTR(_name, _cmd) \
1319 static struct sbi_cmd_attribute hmdfs_attr_##_name = { \
1320 .attr = { .name = __stringify(_name), .mode = 0664 }, \
1321 .command = (_cmd), \
1322 }
1323
1324 HMDFS_CMD_ATTR(open, F_OPEN);
1325 HMDFS_CMD_ATTR(release, F_RELEASE);
1326 HMDFS_CMD_ATTR(readpage, F_READPAGE);
1327 HMDFS_CMD_ATTR(writepage, F_WRITEPAGE);
1328 HMDFS_CMD_ATTR(iterate, F_ITERATE);
1329 HMDFS_CMD_ATTR(rmdir, F_RMDIR);
1330 HMDFS_CMD_ATTR(unlink, F_UNLINK);
1331 HMDFS_CMD_ATTR(rename, F_RENAME);
1332 HMDFS_CMD_ATTR(setattr, F_SETATTR);
1333 HMDFS_CMD_ATTR(statfs, F_STATFS);
1334 HMDFS_CMD_ATTR(drop_push, F_DROP_PUSH);
1335 HMDFS_CMD_ATTR(getattr, F_GETATTR);
1336 HMDFS_CMD_ATTR(fsync, F_FSYNC);
1337 HMDFS_CMD_ATTR(syncfs, F_SYNCFS);
1338 HMDFS_CMD_ATTR(getxattr, F_GETXATTR);
1339 HMDFS_CMD_ATTR(setxattr, F_SETXATTR);
1340 HMDFS_CMD_ATTR(listxattr, F_LISTXATTR);
1341
1342 #define ATTR_LIST(_name) (&hmdfs_attr_##_name.attr)
1343
1344 static struct attribute *sbi_timeout_attrs[] = {
1345 ATTR_LIST(open), ATTR_LIST(release),
1346 ATTR_LIST(readpage), ATTR_LIST(writepage),
1347 ATTR_LIST(iterate), ATTR_LIST(rmdir),
1348 ATTR_LIST(unlink), ATTR_LIST(rename),
1349 ATTR_LIST(setattr),
1350 ATTR_LIST(statfs), ATTR_LIST(drop_push),
1351 ATTR_LIST(getattr), ATTR_LIST(fsync),
1352 ATTR_LIST(syncfs), ATTR_LIST(getxattr),
1353 ATTR_LIST(setxattr), ATTR_LIST(listxattr),
1354 NULL
1355 };
1356
1357 static const struct sysfs_ops sbi_cmd_sysfs_ops = {
1358 .show = cmd_timeout_show,
1359 .store = cmd_timeout_store,
1360 };
1361
sbi_timeout_release(struct kobject * kobj)1362 static void sbi_timeout_release(struct kobject *kobj)
1363 {
1364 struct hmdfs_sb_info *sbi = container_of(kobj, struct hmdfs_sb_info,
1365 s_cmd_timeout_kobj);
1366
1367 complete(&sbi->s_timeout_kobj_unregister);
1368 }
1369
1370 static struct kobj_type sbi_timeout_ktype = {
1371 .sysfs_ops = &sbi_cmd_sysfs_ops,
1372 .default_attrs = sbi_timeout_attrs,
1373 .release = sbi_timeout_release,
1374 };
1375
hmdfs_release_sysfs(struct hmdfs_sb_info * sbi)1376 void hmdfs_release_sysfs(struct hmdfs_sb_info *sbi)
1377 {
1378 kobject_put(&sbi->s_cmd_timeout_kobj);
1379 wait_for_completion(&sbi->s_timeout_kobj_unregister);
1380 kobject_put(&sbi->kobj);
1381 wait_for_completion(&sbi->s_kobj_unregister);
1382 }
1383
hmdfs_register_sysfs(const char * name,struct hmdfs_sb_info * sbi)1384 int hmdfs_register_sysfs(const char *name, struct hmdfs_sb_info *sbi)
1385 {
1386 int ret;
1387 struct kobject *kobj = NULL;
1388
1389 mutex_lock(&hmdfs_sysfs_mutex);
1390 kobj = kset_find_obj(hmdfs_kset, name);
1391 if (kobj) {
1392 hmdfs_err("mount failed, already exist");
1393 kobject_put(kobj);
1394 mutex_unlock(&hmdfs_sysfs_mutex);
1395 return -EEXIST;
1396 }
1397
1398 sbi->kobj.kset = hmdfs_kset;
1399 init_completion(&sbi->s_kobj_unregister);
1400 ret = kobject_init_and_add(&sbi->kobj, &sbi_ktype,
1401 &hmdfs_kset->kobj, "%s", name);
1402 sysfs_change_owner(&sbi->kobj, KUIDT_INIT(1000), KGIDT_INIT(1000));
1403 mutex_unlock(&hmdfs_sysfs_mutex);
1404
1405 if (ret) {
1406 kobject_put(&sbi->kobj);
1407 wait_for_completion(&sbi->s_kobj_unregister);
1408 return ret;
1409 }
1410
1411 init_completion(&sbi->s_timeout_kobj_unregister);
1412 ret = kobject_init_and_add(&sbi->s_cmd_timeout_kobj, &sbi_timeout_ktype,
1413 &sbi->kobj, "cmd_timeout");
1414 if (ret) {
1415 hmdfs_release_sysfs(sbi);
1416 return ret;
1417 }
1418
1419 kobject_uevent(&sbi->kobj, KOBJ_ADD);
1420 return 0;
1421 }
1422
hmdfs_unregister_sysfs(struct hmdfs_sb_info * sbi)1423 void hmdfs_unregister_sysfs(struct hmdfs_sb_info *sbi)
1424 {
1425 kobject_del(&sbi->s_cmd_timeout_kobj);
1426 kobject_del(&sbi->kobj);
1427 }
1428
to_sysfs_fmt_evt(unsigned int evt)1429 static inline int to_sysfs_fmt_evt(unsigned int evt)
1430 {
1431 return evt == RAW_NODE_EVT_NR ? -1 : evt;
1432 }
1433
features_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1434 static ssize_t features_show(struct kobject *kobj, struct peer_attribute *attr,
1435 char *buf)
1436 {
1437 struct hmdfs_peer *peer = to_peer(kobj);
1438
1439 return fill_features(buf, peer->features);
1440 }
1441
event_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1442 static ssize_t event_show(struct kobject *kobj, struct peer_attribute *attr,
1443 char *buf)
1444 {
1445 struct hmdfs_peer *peer = to_peer(kobj);
1446
1447 return snprintf(buf, PAGE_SIZE,
1448 "cur_async evt %d seq %u\n"
1449 "cur_sync evt %d seq %u\n"
1450 "pending evt %d seq %u\n"
1451 "merged evt %u\n"
1452 "dup_drop evt %u %u\n"
1453 "waiting evt %u %u\n"
1454 "seq_tbl %u %u %u %u\n"
1455 "seq_rd_idx %u\n"
1456 "seq_wr_idx %u\n",
1457 to_sysfs_fmt_evt(peer->cur_evt[0]),
1458 peer->cur_evt_seq[0],
1459 to_sysfs_fmt_evt(peer->cur_evt[1]),
1460 peer->cur_evt_seq[1],
1461 to_sysfs_fmt_evt(peer->pending_evt),
1462 peer->pending_evt_seq,
1463 peer->merged_evt,
1464 peer->dup_evt[RAW_NODE_EVT_OFF],
1465 peer->dup_evt[RAW_NODE_EVT_ON],
1466 peer->waiting_evt[RAW_NODE_EVT_OFF],
1467 peer->waiting_evt[RAW_NODE_EVT_ON],
1468 peer->seq_tbl[0], peer->seq_tbl[1], peer->seq_tbl[2],
1469 peer->seq_tbl[3],
1470 peer->seq_rd_idx % RAW_NODE_EVT_MAX_NR,
1471 peer->seq_wr_idx % RAW_NODE_EVT_MAX_NR);
1472 }
1473
stash_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1474 static ssize_t stash_show(struct kobject *kobj, struct peer_attribute *attr,
1475 char *buf)
1476 {
1477 struct hmdfs_peer *peer = to_peer(kobj);
1478
1479 return snprintf(buf, PAGE_SIZE,
1480 "cur_ok %u\n"
1481 "cur_nothing %u\n"
1482 "cur_fail %u\n"
1483 "total_ok %u\n"
1484 "total_nothing %u\n"
1485 "total_fail %u\n"
1486 "ok_pages %llu\n"
1487 "fail_pages %llu\n",
1488 peer->stats.stash.cur_ok,
1489 peer->stats.stash.cur_nothing,
1490 peer->stats.stash.cur_fail,
1491 peer->stats.stash.total_ok,
1492 peer->stats.stash.total_nothing,
1493 peer->stats.stash.total_fail,
1494 peer->stats.stash.ok_pages,
1495 peer->stats.stash.fail_pages);
1496 }
1497
restore_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1498 static ssize_t restore_show(struct kobject *kobj, struct peer_attribute *attr,
1499 char *buf)
1500 {
1501 struct hmdfs_peer *peer = to_peer(kobj);
1502
1503 return snprintf(buf, PAGE_SIZE,
1504 "cur_ok %u\n"
1505 "cur_fail %u\n"
1506 "cur_keep %u\n"
1507 "total_ok %u\n"
1508 "total_fail %u\n"
1509 "total_keep %u\n"
1510 "ok_pages %llu\n"
1511 "fail_pages %llu\n",
1512 peer->stats.restore.cur_ok,
1513 peer->stats.restore.cur_fail,
1514 peer->stats.restore.cur_keep,
1515 peer->stats.restore.total_ok,
1516 peer->stats.restore.total_fail,
1517 peer->stats.restore.total_keep,
1518 peer->stats.restore.ok_pages,
1519 peer->stats.restore.fail_pages);
1520 }
1521
rebuild_show(struct kobject * kobj,struct peer_attribute * attr,char * buf)1522 static ssize_t rebuild_show(struct kobject *kobj, struct peer_attribute *attr,
1523 char *buf)
1524 {
1525 struct hmdfs_peer *peer = to_peer(kobj);
1526
1527 return snprintf(buf, PAGE_SIZE,
1528 "cur_ok %u\n"
1529 "cur_fail %u\n"
1530 "cur_invalid %u\n"
1531 "total_ok %u\n"
1532 "total_fail %u\n"
1533 "total_invalid %u\n"
1534 "time %u\n",
1535 peer->stats.rebuild.cur_ok,
1536 peer->stats.rebuild.cur_fail,
1537 peer->stats.rebuild.cur_invalid,
1538 peer->stats.rebuild.total_ok,
1539 peer->stats.rebuild.total_fail,
1540 peer->stats.rebuild.total_invalid,
1541 peer->stats.rebuild.time);
1542 }
1543
1544 static struct peer_attribute peer_features_attr = __ATTR_RO(features);
1545 static struct peer_attribute peer_event_attr = __ATTR_RO(event);
1546 static struct peer_attribute peer_stash_attr = __ATTR_RO(stash);
1547 static struct peer_attribute peer_restore_attr = __ATTR_RO(restore);
1548 static struct peer_attribute peer_rebuild_attr = __ATTR_RO(rebuild);
1549
1550 static struct attribute *peer_attrs[] = {
1551 &peer_features_attr.attr,
1552 &peer_event_attr.attr,
1553 &peer_stash_attr.attr,
1554 &peer_restore_attr.attr,
1555 &peer_rebuild_attr.attr,
1556 NULL,
1557 };
1558
peer_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1559 static ssize_t peer_attr_show(struct kobject *kobj, struct attribute *attr,
1560 char *buf)
1561 {
1562 struct peer_attribute *peer_attr = to_peer_attr(attr);
1563
1564 if (!peer_attr->show)
1565 return -EIO;
1566 return peer_attr->show(kobj, peer_attr, buf);
1567 }
1568
peer_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)1569 static ssize_t peer_attr_store(struct kobject *kobj, struct attribute *attr,
1570 const char *buf, size_t len)
1571 {
1572 struct peer_attribute *peer_attr = to_peer_attr(attr);
1573
1574 if (!peer_attr->store)
1575 return -EIO;
1576 return peer_attr->store(kobj, peer_attr, buf, len);
1577 }
1578
1579 static const struct sysfs_ops peer_sysfs_ops = {
1580 .show = peer_attr_show,
1581 .store = peer_attr_store,
1582 };
1583
peer_sysfs_release(struct kobject * kobj)1584 static void peer_sysfs_release(struct kobject *kobj)
1585 {
1586 struct hmdfs_peer *peer = to_peer(kobj);
1587
1588 complete(&peer->kobj_unregister);
1589 }
1590
1591 static struct kobj_type peer_ktype = {
1592 .sysfs_ops = &peer_sysfs_ops,
1593 .default_attrs = peer_attrs,
1594 .release = peer_sysfs_release,
1595 };
1596
hmdfs_register_peer_sysfs(struct hmdfs_sb_info * sbi,struct hmdfs_peer * peer)1597 int hmdfs_register_peer_sysfs(struct hmdfs_sb_info *sbi,
1598 struct hmdfs_peer *peer)
1599 {
1600 int err = 0;
1601
1602 init_completion(&peer->kobj_unregister);
1603 err = kobject_init_and_add(&peer->kobj, &peer_ktype, &sbi->kobj,
1604 "peer_%llu", peer->device_id);
1605 return err;
1606 }
1607
hmdfs_release_peer_sysfs(struct hmdfs_peer * peer)1608 void hmdfs_release_peer_sysfs(struct hmdfs_peer *peer)
1609 {
1610 kobject_del(&peer->kobj);
1611 kobject_put(&peer->kobj);
1612 wait_for_completion(&peer->kobj_unregister);
1613 }
1614
notify(struct hmdfs_peer * node,struct notify_param * param)1615 void notify(struct hmdfs_peer *node, struct notify_param *param)
1616 {
1617 struct hmdfs_sb_info *sbi = node->sbi;
1618 int in_len;
1619
1620 if (!param)
1621 return;
1622 spin_lock(&sbi->notify_fifo_lock);
1623 in_len =
1624 kfifo_in(&sbi->notify_fifo, param, sizeof(struct notify_param));
1625 spin_unlock(&sbi->notify_fifo_lock);
1626 if (in_len != sizeof(struct notify_param))
1627 return;
1628 sysfs_notify(&sbi->kobj, NULL, "cmd");
1629 }
1630
hmdfs_sysfs_init(void)1631 int hmdfs_sysfs_init(void)
1632 {
1633 hmdfs_kset = kset_create_and_add("hmdfs", NULL, fs_kobj);
1634 if (!hmdfs_kset)
1635 return -ENOMEM;
1636
1637 return 0;
1638 }
1639
hmdfs_sysfs_exit(void)1640 void hmdfs_sysfs_exit(void)
1641 {
1642 kset_unregister(hmdfs_kset);
1643 hmdfs_kset = NULL;
1644 }
1645