1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/comm/connection.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "connection.h"
9
10 #include <linux/file.h>
11 #include <linux/freezer.h>
12 #include <linux/fs.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/tcp.h>
17 #include <linux/workqueue.h>
18
19 #include "device_node.h"
20 #include "hmdfs.h"
21 #include "message_verify.h"
22 #include "node_cb.h"
23 #include "protocol.h"
24 #include "socket_adapter.h"
25
26 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
27 #include "crypto.h"
28 #endif
29
30 #define HMDFS_WAIT_REQUEST_END_MIN 20
31 #define HMDFS_WAIT_REQUEST_END_MAX 30
32
33 #define HMDFS_WAIT_CONN_RELEASE (3 * HZ)
34
35 #define HMDFS_RETRY_WB_WQ_MAX_ACTIVE 16
36
hs_fill_crypto_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)37 static void hs_fill_crypto_data(struct connection *conn_impl, __u8 ops,
38 void *data, __u32 len)
39 {
40 struct crypto_body *body = NULL;
41
42 if (len < sizeof(struct crypto_body)) {
43 hmdfs_info("crpto body len %u is err", len);
44 return;
45 }
46 body = (struct crypto_body *)data;
47
48 /* this is only test, later need to fill right algorithm. */
49 body->crypto |= HMDFS_HS_CRYPTO_KTLS_AES128;
50 body->crypto = cpu_to_le32(body->crypto);
51
52 hmdfs_info("fill crypto. ccrtypto=0x%08x", body->crypto);
53 }
54
hs_parse_crypto_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)55 static int hs_parse_crypto_data(struct connection *conn_impl, __u8 ops,
56 void *data, __u32 len)
57 {
58 struct crypto_body *hs_crypto = NULL;
59 uint32_t crypto;
60
61 if (len < sizeof(struct crypto_body)) {
62 hmdfs_info("handshake msg len error, len=%u", len);
63 return -1;
64 }
65 hs_crypto = (struct crypto_body *)data;
66 crypto = le16_to_cpu(hs_crypto->crypto);
67 conn_impl->crypto = crypto;
68 hmdfs_info("ops=%u, len=%u, crypto=0x%08x", ops, len, crypto);
69 return 0;
70 }
71
hs_fill_case_sense_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)72 static void hs_fill_case_sense_data(struct connection *conn_impl, __u8 ops,
73 void *data, __u32 len)
74 {
75 struct case_sense_body *body = (struct case_sense_body *)data;
76
77 if (len < sizeof(struct case_sense_body)) {
78 hmdfs_err("case sensitive len %u is err", len);
79 return;
80 }
81 body->case_sensitive = conn_impl->node->sbi->s_case_sensitive;
82 }
83
hs_parse_case_sense_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)84 static int hs_parse_case_sense_data(struct connection *conn_impl, __u8 ops,
85 void *data, __u32 len)
86 {
87 struct case_sense_body *body = (struct case_sense_body *)data;
88 __u8 sensitive = conn_impl->node->sbi->s_case_sensitive ? 1 : 0;
89
90 if (len < sizeof(struct case_sense_body)) {
91 hmdfs_info("case sensitive len %u is err", len);
92 return -1;
93 }
94 if (body->case_sensitive != sensitive) {
95 hmdfs_err("case sensitive inconsistent, server: %u,client: %u, ops: %u",
96 body->case_sensitive, sensitive, ops);
97 return -1;
98 }
99 return 0;
100 }
101
hs_fill_feature_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)102 static void hs_fill_feature_data(struct connection *conn_impl, __u8 ops,
103 void *data, __u32 len)
104 {
105 struct feature_body *body = (struct feature_body *)data;
106
107 if (len < sizeof(struct feature_body)) {
108 hmdfs_err("feature len %u is err", len);
109 return;
110 }
111 body->features = cpu_to_le64(conn_impl->node->sbi->s_features);
112 body->reserved = cpu_to_le64(0);
113 }
114
hs_parse_feature_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)115 static int hs_parse_feature_data(struct connection *conn_impl, __u8 ops,
116 void *data, __u32 len)
117 {
118 struct feature_body *body = (struct feature_body *)data;
119
120 if (len < sizeof(struct feature_body)) {
121 hmdfs_err("feature len %u is err", len);
122 return -1;
123 }
124
125 conn_impl->node->features = le64_to_cpu(body->features);
126 return 0;
127 }
128
129 /* should ensure len is small than 0xffff. */
130 static const struct conn_hs_extend_reg s_hs_extend_reg[HS_EXTEND_CODE_COUNT] = {
131 [HS_EXTEND_CODE_CRYPTO] = {
132 .len = sizeof(struct crypto_body),
133 .resv = 0,
134 .filler = hs_fill_crypto_data,
135 .parser = hs_parse_crypto_data
136 },
137 [HS_EXTEND_CODE_CASE_SENSE] = {
138 .len = sizeof(struct case_sense_body),
139 .resv = 0,
140 .filler = hs_fill_case_sense_data,
141 .parser = hs_parse_case_sense_data,
142 },
143 [HS_EXTEND_CODE_FEATURE_SUPPORT] = {
144 .len = sizeof(struct feature_body),
145 .resv = 0,
146 .filler = hs_fill_feature_data,
147 .parser = hs_parse_feature_data,
148 },
149 [HS_EXTEND_CODE_FEATURE_SUPPORT] = {
150 .len = sizeof(struct feature_body),
151 .resv = 0,
152 .filler = hs_fill_feature_data,
153 .parser = hs_parse_feature_data,
154 },
155 };
156
hs_get_extend_data_len(void)157 static __u32 hs_get_extend_data_len(void)
158 {
159 __u32 len;
160 int i;
161
162 len = sizeof(struct conn_hs_extend_head);
163
164 for (i = 0; i < HS_EXTEND_CODE_COUNT; i++) {
165 len += sizeof(struct extend_field_head);
166 len += s_hs_extend_reg[i].len;
167 }
168
169 hmdfs_info("extend data total len is %u", len);
170 return len;
171 }
172
hs_fill_extend_data(struct connection * conn_impl,__u8 ops,void * extend_data,__u32 len)173 static void hs_fill_extend_data(struct connection *conn_impl, __u8 ops,
174 void *extend_data, __u32 len)
175 {
176 struct conn_hs_extend_head *extend_head = NULL;
177 struct extend_field_head *field = NULL;
178 uint8_t *body = NULL;
179 __u32 offset;
180 __u16 i;
181
182 if (sizeof(struct conn_hs_extend_head) > len) {
183 hmdfs_info("len error. len=%u", len);
184 return;
185 }
186 extend_head = (struct conn_hs_extend_head *)extend_data;
187 extend_head->field_cn = 0;
188 offset = sizeof(struct conn_hs_extend_head);
189
190 for (i = 0; i < HS_EXTEND_CODE_COUNT; i++) {
191 if (sizeof(struct extend_field_head) > (len - offset))
192 break;
193 field = (struct extend_field_head *)((uint8_t *)extend_data +
194 offset);
195 offset += sizeof(struct extend_field_head);
196
197 if (s_hs_extend_reg[i].len > (len - offset))
198 break;
199 body = (uint8_t *)extend_data + offset;
200 offset += s_hs_extend_reg[i].len;
201
202 field->code = cpu_to_le16(i);
203 field->len = cpu_to_le16(s_hs_extend_reg[i].len);
204
205 if (s_hs_extend_reg[i].filler)
206 s_hs_extend_reg[i].filler(conn_impl, ops,
207 body, s_hs_extend_reg[i].len);
208
209 extend_head->field_cn += 1;
210 }
211
212 extend_head->field_cn = cpu_to_le32(extend_head->field_cn);
213 }
214
hs_parse_extend_data(struct connection * conn_impl,__u8 ops,void * extend_data,__u32 extend_len)215 static int hs_parse_extend_data(struct connection *conn_impl, __u8 ops,
216 void *extend_data, __u32 extend_len)
217 {
218 struct conn_hs_extend_head *extend_head = NULL;
219 struct extend_field_head *field = NULL;
220 uint8_t *body = NULL;
221 __u32 offset;
222 __u32 field_cnt;
223 __u16 code;
224 __u16 len;
225 int i;
226 int ret;
227
228 if (sizeof(struct conn_hs_extend_head) > extend_len) {
229 hmdfs_err("ops=%u,extend_len=%u", ops, extend_len);
230 return -1;
231 }
232 extend_head = (struct conn_hs_extend_head *)extend_data;
233 field_cnt = le32_to_cpu(extend_head->field_cn);
234 hmdfs_info("extend_len=%u,field_cnt=%u", extend_len, field_cnt);
235
236 offset = sizeof(struct conn_hs_extend_head);
237
238 for (i = 0; i < field_cnt; i++) {
239 if (sizeof(struct extend_field_head) > (extend_len - offset)) {
240 hmdfs_err("cnt err, op=%u, extend_len=%u, cnt=%u, i=%u",
241 ops, extend_len, field_cnt, i);
242 return -1;
243 }
244 field = (struct extend_field_head *)((uint8_t *)extend_data +
245 offset);
246 offset += sizeof(struct extend_field_head);
247 code = le16_to_cpu(field->code);
248 len = le16_to_cpu(field->len);
249 if (len > (extend_len - offset)) {
250 hmdfs_err("len err, op=%u, extend_len=%u, cnt=%u, i=%u",
251 ops, extend_len, field_cnt, i);
252 hmdfs_err("len err, code=%u, len=%u, offset=%u", code,
253 len, offset);
254 return -1;
255 }
256
257 body = (uint8_t *)extend_data + offset;
258 offset += len;
259 if ((code < HS_EXTEND_CODE_COUNT) &&
260 (s_hs_extend_reg[code].parser)) {
261 ret = s_hs_extend_reg[code].parser(conn_impl, ops,
262 body, len);
263 if (ret)
264 return ret;
265 }
266 }
267 return 0;
268 }
269
hs_proc_msg_data(struct connection * conn_impl,__u8 ops,void * data,__u32 data_len)270 static int hs_proc_msg_data(struct connection *conn_impl, __u8 ops, void *data,
271 __u32 data_len)
272 {
273 struct connection_handshake_req *hs_req = NULL;
274 uint8_t *extend_data = NULL;
275 __u32 extend_len;
276 __u32 req_len;
277 int ret;
278
279 if (!data) {
280 hmdfs_err("err, msg data is null");
281 return -1;
282 }
283
284 if (data_len < sizeof(struct connection_handshake_req)) {
285 hmdfs_err("ack msg data len error. data_len=%u, device_id=%llu",
286 data_len, conn_impl->node->device_id);
287 return -1;
288 }
289
290 hs_req = (struct connection_handshake_req *)data;
291 req_len = le32_to_cpu(hs_req->len);
292 if (req_len > (data_len - sizeof(struct connection_handshake_req))) {
293 hmdfs_info(
294 "ack msg hs_req len(%u) error. data_len=%u, device_id=%llu",
295 req_len, data_len, conn_impl->node->device_id);
296 return -1;
297 }
298 extend_len =
299 data_len - sizeof(struct connection_handshake_req) - req_len;
300 extend_data = (uint8_t *)data +
301 sizeof(struct connection_handshake_req) + req_len;
302 ret = hs_parse_extend_data(conn_impl, ops, extend_data, extend_len);
303 if (!ret)
304 hmdfs_info(
305 "hs msg rcv, ops=%u, data_len=%u, device_id=%llu, req_len=%u",
306 ops, data_len, conn_impl->node->device_id, hs_req->len);
307 return ret;
308 }
309 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
connection_handshake_init_tls(struct connection * conn_impl,__u8 ops)310 static int connection_handshake_init_tls(struct connection *conn_impl, __u8 ops)
311 {
312 // init ktls config, use key1/key2 as init write-key of each direction
313 __u8 key1[HMDFS_KEY_SIZE];
314 __u8 key2[HMDFS_KEY_SIZE];
315 int ret;
316
317 if ((ops != CONNECT_MESG_HANDSHAKE_RESPONSE) &&
318 (ops != CONNECT_MESG_HANDSHAKE_ACK)) {
319 hmdfs_err("ops %u is err", ops);
320 return -EINVAL;
321 }
322
323 update_key(conn_impl->master_key, key1, HKDF_TYPE_KEY_INITIATOR);
324 update_key(conn_impl->master_key, key2, HKDF_TYPE_KEY_ACCEPTER);
325
326 if (ops == CONNECT_MESG_HANDSHAKE_ACK) {
327 memcpy(conn_impl->send_key, key1, HMDFS_KEY_SIZE);
328 memcpy(conn_impl->recv_key, key2, HMDFS_KEY_SIZE);
329 } else {
330 memcpy(conn_impl->send_key, key2, HMDFS_KEY_SIZE);
331 memcpy(conn_impl->recv_key, key1, HMDFS_KEY_SIZE);
332 }
333
334 memset(key1, 0, HMDFS_KEY_SIZE);
335 memset(key2, 0, HMDFS_KEY_SIZE);
336
337 hmdfs_info("hs: ops=%u start set crypto tls", ops);
338 ret = tls_crypto_info_init(conn_impl);
339 if (ret)
340 hmdfs_err("setting tls fail. ops is %u", ops);
341
342 return ret;
343 }
344 #endif
345
do_send_handshake(struct connection * conn_impl,__u8 ops,__le16 request_id)346 static int do_send_handshake(struct connection *conn_impl, __u8 ops,
347 __le16 request_id)
348 {
349 int err;
350 struct connection_msg_head *hs_head = NULL;
351 struct connection_handshake_req *hs_data = NULL;
352 uint8_t *hs_extend_data = NULL;
353 struct hmdfs_send_data msg;
354 __u32 send_len;
355 __u32 len;
356 __u32 extend_len;
357 char buf[HMDFS_CID_SIZE] = { 0 };
358
359 len = scnprintf(buf, HMDFS_CID_SIZE, "%llu", 0ULL);
360 send_len = sizeof(struct connection_msg_head) +
361 sizeof(struct connection_handshake_req) + len;
362
363 if (((ops == CONNECT_MESG_HANDSHAKE_RESPONSE) ||
364 (ops == CONNECT_MESG_HANDSHAKE_ACK)) &&
365 (conn_impl->node->version >= DFS_2_0)) {
366 extend_len = hs_get_extend_data_len();
367 send_len += extend_len;
368 }
369
370 hs_head = kzalloc(send_len, GFP_KERNEL);
371 if (!hs_head)
372 return -ENOMEM;
373
374 hs_data = (struct connection_handshake_req
375 *)((uint8_t *)hs_head +
376 sizeof(struct connection_msg_head));
377
378 hs_data->len = cpu_to_le32(len);
379 memcpy(hs_data->dev_id, buf, len);
380
381 if (((ops == CONNECT_MESG_HANDSHAKE_RESPONSE) ||
382 ops == CONNECT_MESG_HANDSHAKE_ACK) &&
383 (conn_impl->node->version >= DFS_2_0)) {
384 hs_extend_data = (uint8_t *)hs_data +
385 sizeof(struct connection_handshake_req) + len;
386 hs_fill_extend_data(conn_impl, ops, hs_extend_data, extend_len);
387 }
388
389 hs_head->magic = HMDFS_MSG_MAGIC;
390 hs_head->version = DFS_2_0;
391 hs_head->flags |= 0x1;
392 hmdfs_info("Send handshake message: ops = %d, fd = %d", ops,
393 ((struct tcp_handle *)(conn_impl->connect_handle))->fd);
394 hs_head->operations = ops;
395 hs_head->request_id = request_id;
396 hs_head->datasize = cpu_to_le32(send_len);
397 hs_head->source = 0;
398 hs_head->msg_id = 0;
399
400 msg.head = hs_head;
401 msg.head_len = sizeof(struct connection_msg_head);
402 msg.data = hs_data;
403 msg.len = send_len - msg.head_len;
404 msg.sdesc = NULL;
405 msg.sdesc_len = 0;
406 err = conn_impl->send_message(conn_impl, &msg);
407 kfree(hs_head);
408 return err;
409 }
410
hmdfs_node_waiting_evt_sum(const struct hmdfs_peer * node)411 static int hmdfs_node_waiting_evt_sum(const struct hmdfs_peer *node)
412 {
413 int sum = 0;
414 int i;
415
416 for (i = 0; i < RAW_NODE_EVT_NR; i++)
417 sum += node->waiting_evt[i];
418
419 return sum;
420 }
421
hmdfs_update_node_waiting_evt(struct hmdfs_peer * node,int evt,unsigned int * seq)422 static int hmdfs_update_node_waiting_evt(struct hmdfs_peer *node, int evt,
423 unsigned int *seq)
424 {
425 int last;
426 int sum;
427 unsigned int next;
428
429 sum = hmdfs_node_waiting_evt_sum(node);
430 if (sum % RAW_NODE_EVT_NR)
431 last = !node->pending_evt;
432 else
433 last = node->pending_evt;
434
435 /* duplicated event */
436 if (evt == last) {
437 node->dup_evt[evt]++;
438 return 0;
439 }
440
441 node->waiting_evt[evt]++;
442 hmdfs_debug("add node->waiting_evt[%d]=%d", evt,
443 node->waiting_evt[evt]);
444
445 /* offline wait + online wait + offline wait = offline wait
446 * online wait + offline wait + online wait != online wait
447 * As the first online related resource (e.g. fd) must be invalidated
448 */
449 if (node->waiting_evt[RAW_NODE_EVT_OFF] >= 2 &&
450 node->waiting_evt[RAW_NODE_EVT_ON] >= 1) {
451 node->waiting_evt[RAW_NODE_EVT_OFF] -= 1;
452 node->waiting_evt[RAW_NODE_EVT_ON] -= 1;
453 node->seq_wr_idx -= 2;
454 node->merged_evt += 2;
455 }
456
457 next = hmdfs_node_inc_evt_seq(node);
458 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = next;
459 *seq = next;
460
461 return 1;
462 }
463
hmdfs_run_evt_cb_verbosely(struct hmdfs_peer * node,int raw_evt,bool sync,unsigned int seq)464 static void hmdfs_run_evt_cb_verbosely(struct hmdfs_peer *node, int raw_evt,
465 bool sync, unsigned int seq)
466 {
467 int evt = (raw_evt == RAW_NODE_EVT_OFF) ? NODE_EVT_OFFLINE :
468 NODE_EVT_ONLINE;
469 int cur_evt_idx = sync ? 1 : 0;
470
471 node->cur_evt[cur_evt_idx] = raw_evt;
472 node->cur_evt_seq[cur_evt_idx] = seq;
473 hmdfs_node_call_evt_cb(node, evt, sync, seq);
474 node->cur_evt[cur_evt_idx] = RAW_NODE_EVT_NR;
475 }
476
hmdfs_node_evt_work(struct work_struct * work)477 static void hmdfs_node_evt_work(struct work_struct *work)
478 {
479 struct hmdfs_peer *node =
480 container_of(work, struct hmdfs_peer, evt_dwork.work);
481 unsigned int seq;
482
483 /*
484 * N-th sync cb completes before N-th async cb,
485 * so use seq_lock as a barrier in read & write path
486 * to ensure we can read the required seq.
487 */
488 mutex_lock(&node->seq_lock);
489 seq = node->seq_tbl[(node->seq_rd_idx++) % RAW_NODE_EVT_MAX_NR];
490 hmdfs_run_evt_cb_verbosely(node, node->pending_evt, false, seq);
491 mutex_unlock(&node->seq_lock);
492
493 mutex_lock(&node->evt_lock);
494 if (hmdfs_node_waiting_evt_sum(node)) {
495 node->pending_evt = !node->pending_evt;
496 node->pending_evt_seq =
497 node->seq_tbl[node->seq_rd_idx % RAW_NODE_EVT_MAX_NR];
498 node->waiting_evt[node->pending_evt]--;
499 /* sync cb has been done */
500 schedule_delayed_work(&node->evt_dwork,
501 node->sbi->async_cb_delay * HZ);
502 } else {
503 node->last_evt = node->pending_evt;
504 node->pending_evt = RAW_NODE_EVT_NR;
505 }
506 mutex_unlock(&node->evt_lock);
507 }
508
509 /*
510 * The running orders of cb are:
511 *
512 * (1) sync callbacks are invoked according to the queue order of raw events:
513 * ensured by seq_lock.
514 * (2) async callbacks are invoked according to the queue order of raw events:
515 * ensured by evt_lock & evt_dwork
516 * (3) async callback is invoked after sync callback of the same raw event:
517 * ensured by seq_lock.
518 * (4) async callback of N-th raw event and sync callback of (N+x)-th raw
519 * event can run concurrently.
520 */
hmdfs_queue_raw_node_evt(struct hmdfs_peer * node,int evt)521 static void hmdfs_queue_raw_node_evt(struct hmdfs_peer *node, int evt)
522 {
523 unsigned int seq = 0;
524
525 mutex_lock(&node->evt_lock);
526 if (node->pending_evt == RAW_NODE_EVT_NR) {
527 if (evt == node->last_evt) {
528 node->dup_evt[evt]++;
529 mutex_unlock(&node->evt_lock);
530 return;
531 }
532 node->pending_evt = evt;
533 seq = hmdfs_node_inc_evt_seq(node);
534 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = seq;
535 node->pending_evt_seq = seq;
536 mutex_lock(&node->seq_lock);
537 mutex_unlock(&node->evt_lock);
538 /* call sync cb, then async cb */
539 hmdfs_run_evt_cb_verbosely(node, evt, true, seq);
540 mutex_unlock(&node->seq_lock);
541 schedule_delayed_work(&node->evt_dwork,
542 node->sbi->async_cb_delay * HZ);
543 } else if (hmdfs_update_node_waiting_evt(node, evt, &seq) > 0) {
544 /*
545 * Take seq_lock firstly to ensure N-th sync cb
546 * is called before N-th async cb.
547 */
548 mutex_lock(&node->seq_lock);
549 mutex_unlock(&node->evt_lock);
550 hmdfs_run_evt_cb_verbosely(node, evt, true, seq);
551 mutex_unlock(&node->seq_lock);
552 } else {
553 mutex_unlock(&node->evt_lock);
554 }
555 }
556
connection_send_handshake(struct connection * conn_impl,__u8 ops,__le16 request_id)557 void connection_send_handshake(struct connection *conn_impl, __u8 ops,
558 __le16 request_id)
559 {
560 struct tcp_handle *tcp = NULL;
561 int err = do_send_handshake(conn_impl, ops, request_id);
562
563 if (likely(err >= 0))
564 return;
565
566 tcp = conn_impl->connect_handle;
567 hmdfs_err("Failed to send handshake: err = %d, fd = %d", err, tcp->fd);
568 hmdfs_reget_connection(conn_impl);
569 }
570
connection_handshake_notify(struct hmdfs_peer * node,int notify_type)571 void connection_handshake_notify(struct hmdfs_peer *node, int notify_type)
572 {
573 struct notify_param param;
574
575 param.notify = notify_type;
576 param.fd = INVALID_SOCKET_FD;
577 memcpy(param.remote_cid, node->cid, HMDFS_CID_SIZE);
578 notify(node, ¶m);
579 }
580
581
peer_online(struct hmdfs_peer * peer)582 void peer_online(struct hmdfs_peer *peer)
583 {
584 // To evaluate if someone else has made the peer online
585 u8 prev_stat = xchg(&peer->status, NODE_STAT_ONLINE);
586 unsigned long jif_tmp = jiffies;
587
588 if (prev_stat == NODE_STAT_ONLINE)
589 return;
590 WRITE_ONCE(peer->conn_time, jif_tmp);
591 WRITE_ONCE(peer->sbi->connections.recent_ol, jif_tmp);
592 hmdfs_queue_raw_node_evt(peer, RAW_NODE_EVT_ON);
593 }
594
connection_to_working(struct hmdfs_peer * node)595 void connection_to_working(struct hmdfs_peer *node)
596 {
597 struct connection *conn_impl = NULL;
598 struct tcp_handle *tcp = NULL;
599
600 if (!node)
601 return;
602 mutex_lock(&node->conn_impl_list_lock);
603 list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
604 if (conn_impl->type == CONNECT_TYPE_TCP &&
605 conn_impl->status == CONNECT_STAT_WAIT_RESPONSE) {
606 tcp = conn_impl->connect_handle;
607 hmdfs_info("fd %d to working", tcp->fd);
608 conn_impl->status = CONNECT_STAT_WORKING;
609 }
610 }
611 mutex_unlock(&node->conn_impl_list_lock);
612 peer_online(node);
613 }
614
connection_check_version(__u8 version)615 static int connection_check_version(__u8 version)
616 {
617 __u8 min_ver = USERSPACE_MAX_VER;
618
619 if (version <= min_ver || version >= MAX_VERSION) {
620 hmdfs_info("version err. version %u", version);
621 return -1;
622 }
623 return 0;
624 }
625
connection_handshake_recv_handler(struct connection * conn_impl,void * buf,void * data,__u32 data_len)626 void connection_handshake_recv_handler(struct connection *conn_impl, void *buf,
627 void *data, __u32 data_len)
628 {
629 __u8 version;
630 __u8 ops;
631 __u8 status;
632 int fd = ((struct tcp_handle *)(conn_impl->connect_handle))->fd;
633 struct connection_msg_head *head = (struct connection_msg_head *)buf;
634 int ret;
635
636 version = head->version;
637 conn_impl->node->version = version;
638 if (connection_check_version(version) != 0)
639 goto out;
640 conn_impl->node->conn_operations = hmdfs_get_peer_operation(version);
641 ops = head->operations;
642 status = conn_impl->status;
643 switch (ops) {
644 case CONNECT_MESG_HANDSHAKE_REQUEST:
645 hmdfs_info(
646 "Recved handshake request: device_id = %llu, version = %d, head->len = %d, tcp->fd = %d",
647 conn_impl->node->device_id, version, head->datasize, fd);
648 connection_send_handshake(conn_impl,
649 CONNECT_MESG_HANDSHAKE_RESPONSE,
650 head->msg_id);
651 if (conn_impl->node->version >= DFS_2_0) {
652 conn_impl->status = CONNECT_STAT_WAIT_ACK;
653 conn_impl->node->status = NODE_STAT_SHAKING;
654 } else {
655 conn_impl->status = CONNECT_STAT_WORKING;
656 }
657 break;
658 case CONNECT_MESG_HANDSHAKE_RESPONSE:
659 hmdfs_info(
660 "Recved handshake response: device_id = %llu, cmd->status = %hhu, tcp->fd = %d",
661 conn_impl->node->device_id, status, fd);
662 if (status == CONNECT_STAT_WAIT_REQUEST) {
663 // must be 10.1 device, no need to set ktls
664 connection_to_working(conn_impl->node);
665 goto out;
666 }
667
668 if (conn_impl->node->version >= DFS_2_0) {
669 ret = hs_proc_msg_data(conn_impl, ops, data, data_len);
670 if (ret)
671 goto nego_err;
672 connection_send_handshake(conn_impl,
673 CONNECT_MESG_HANDSHAKE_ACK,
674 head->msg_id);
675 hmdfs_info("respon rcv handle,conn_impl->crypto=0x%0x",
676 conn_impl->crypto);
677 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
678 ret = connection_handshake_init_tls(conn_impl, ops);
679 if (ret) {
680 hmdfs_err("init_tls_key fail, ops %u", ops);
681 goto out;
682 }
683 #endif
684 }
685
686 conn_impl->status = CONNECT_STAT_WORKING;
687 peer_online(conn_impl->node);
688 break;
689 case CONNECT_MESG_HANDSHAKE_ACK:
690 if (conn_impl->node->version >= DFS_2_0) {
691 ret = hs_proc_msg_data(conn_impl, ops, data, data_len);
692 if (ret)
693 goto nego_err;
694 hmdfs_info("ack rcv handle, conn_impl->crypto=0x%0x",
695 conn_impl->crypto);
696 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
697 ret = connection_handshake_init_tls(conn_impl, ops);
698 if (ret) {
699 hmdfs_err("init_tls_key fail, ops %u", ops);
700 goto out;
701 }
702 #endif
703 conn_impl->status = CONNECT_STAT_WORKING;
704 peer_online(conn_impl->node);
705 break;
706 }
707 fallthrough;
708 default:
709 break;
710 }
711 out:
712 kfree(data);
713 return;
714 nego_err:
715 conn_impl->status = CONNECT_STAT_NEGO_FAIL;
716 connection_handshake_notify(conn_impl->node,
717 NOTIFY_OFFLINE);
718 hmdfs_err("protocol negotiation failed, remote device_id = %llu, tcp->fd = %d",
719 conn_impl->node->device_id, fd);
720 goto out;
721 }
722
723 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
update_tls_crypto_key(struct connection * conn,struct hmdfs_head_cmd * head,void * data,__u32 data_len)724 static void update_tls_crypto_key(struct connection *conn,
725 struct hmdfs_head_cmd *head, void *data,
726 __u32 data_len)
727 {
728 // rekey message handler
729 struct connection_rekey_request *rekey_req = NULL;
730 int ret = 0;
731
732 if (hmdfs_message_verify(conn->node, head, data) < 0) {
733 hmdfs_err("Rekey msg %d has been abandoned", head->msg_id);
734 goto out_err;
735 }
736
737 hmdfs_info("recv REKEY request");
738 set_crypto_info(conn, SET_CRYPTO_RECV);
739 // update send key if requested
740 rekey_req = data;
741 if (le32_to_cpu(rekey_req->update_request) == UPDATE_REQUESTED) {
742 ret = tcp_send_rekey_request(conn);
743 if (ret == 0)
744 set_crypto_info(conn, SET_CRYPTO_SEND);
745 }
746 out_err:
747 kfree(data);
748 }
749
cmd_update_tls_crypto_key(struct connection * conn,struct hmdfs_head_cmd * head)750 static bool cmd_update_tls_crypto_key(struct connection *conn,
751 struct hmdfs_head_cmd *head)
752 {
753 __u8 version = conn->node->version;
754 struct tcp_handle *tcp = conn->connect_handle;
755
756 if (version < DFS_2_0 || conn->type != CONNECT_TYPE_TCP || !tcp)
757 return false;
758 return head->operations.command == F_CONNECT_REKEY;
759 }
760 #endif
761
connection_working_recv_handler(struct connection * conn_impl,void * buf,void * data,__u32 data_len)762 void connection_working_recv_handler(struct connection *conn_impl, void *buf,
763 void *data, __u32 data_len)
764 {
765 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
766 if (cmd_update_tls_crypto_key(conn_impl, buf)) {
767 update_tls_crypto_key(conn_impl, buf, data, data_len);
768 return;
769 }
770 #endif
771 conn_impl->node->conn_operations->recvmsg(conn_impl->node, buf, data);
772 }
773
connection_release(struct kref * ref)774 static void connection_release(struct kref *ref)
775 {
776 struct tcp_handle *tcp = NULL;
777 struct connection *conn = container_of(ref, struct connection, ref_cnt);
778
779 hmdfs_info("connection release");
780 memset(conn->master_key, 0, HMDFS_KEY_SIZE);
781 memset(conn->send_key, 0, HMDFS_KEY_SIZE);
782 memset(conn->recv_key, 0, HMDFS_KEY_SIZE);
783 if (conn->close)
784 conn->close(conn);
785 tcp = conn->connect_handle;
786 crypto_free_aead(conn->tfm);
787 // need to check and test: fput(tcp->sock->file);
788 if (tcp && tcp->sock) {
789 hmdfs_info("connection release: fd = %d, refcount %ld", tcp->fd,
790 file_count(tcp->sock->file));
791 sockfd_put(tcp->sock);
792 }
793 if (tcp && tcp->recv_cache)
794 kmem_cache_destroy(tcp->recv_cache);
795
796 if (!list_empty(&conn->list)) {
797 mutex_lock(&conn->node->conn_impl_list_lock);
798 list_del(&conn->list);
799 mutex_unlock(&conn->node->conn_impl_list_lock);
800 /*
801 * wakup hmdfs_disconnect_node to check
802 * conn_deleting_list if empty.
803 */
804 wake_up_interruptible(&conn->node->deleting_list_wq);
805 }
806
807 kfree(tcp);
808 kfree(conn);
809 }
810
hmdfs_peer_release(struct kref * ref)811 static void hmdfs_peer_release(struct kref *ref)
812 {
813 struct hmdfs_peer *peer = container_of(ref, struct hmdfs_peer, ref_cnt);
814 struct mutex *lock = &peer->sbi->connections.node_lock;
815
816 if (!list_empty(&peer->list))
817 hmdfs_info("releasing a on-sbi peer: device_id %llu ",
818 peer->device_id);
819 else
820 hmdfs_info("releasing a redundant peer: device_id %llu ",
821 peer->device_id);
822
823 cancel_delayed_work_sync(&peer->evt_dwork);
824 list_del(&peer->list);
825 idr_destroy(&peer->msg_idr);
826 idr_destroy(&peer->file_id_idr);
827 flush_workqueue(peer->req_handle_wq);
828 flush_workqueue(peer->async_wq);
829 flush_workqueue(peer->retry_wb_wq);
830 destroy_workqueue(peer->dentry_wq);
831 destroy_workqueue(peer->req_handle_wq);
832 destroy_workqueue(peer->async_wq);
833 destroy_workqueue(peer->retry_wb_wq);
834 destroy_workqueue(peer->reget_conn_wq);
835 kfree(peer);
836 mutex_unlock(lock);
837 }
838
connection_put(struct connection * conn)839 void connection_put(struct connection *conn)
840 {
841 struct mutex *lock = &conn->ref_lock;
842
843 kref_put_mutex(&conn->ref_cnt, connection_release, lock);
844 }
845
peer_put(struct hmdfs_peer * peer)846 void peer_put(struct hmdfs_peer *peer)
847 {
848 struct mutex *lock = &peer->sbi->connections.node_lock;
849
850 kref_put_mutex(&peer->ref_cnt, hmdfs_peer_release, lock);
851 }
852
hmdfs_dump_deleting_list(struct hmdfs_peer * node)853 static void hmdfs_dump_deleting_list(struct hmdfs_peer *node)
854 {
855 struct connection *con = NULL;
856 struct tcp_handle *tcp = NULL;
857 int count = 0;
858
859 mutex_lock(&node->conn_impl_list_lock);
860 list_for_each_entry(con, &node->conn_deleting_list, list) {
861 tcp = con->connect_handle;
862 hmdfs_info("deleting list %d:device_id %llu tcp_fd %d refcnt %d",
863 count, node->device_id, tcp ? tcp->fd : -1,
864 kref_read(&con->ref_cnt));
865 count++;
866 }
867 mutex_unlock(&node->conn_impl_list_lock);
868 }
869
hmdfs_conn_deleting_list_empty(struct hmdfs_peer * node)870 static bool hmdfs_conn_deleting_list_empty(struct hmdfs_peer *node)
871 {
872 bool empty = false;
873
874 mutex_lock(&node->conn_impl_list_lock);
875 empty = list_empty(&node->conn_deleting_list);
876 mutex_unlock(&node->conn_impl_list_lock);
877
878 return empty;
879 }
880
hmdfs_disconnect_node(struct hmdfs_peer * node)881 void hmdfs_disconnect_node(struct hmdfs_peer *node)
882 {
883 LIST_HEAD(local_conns);
884 struct connection *conn_impl = NULL;
885 struct connection *next = NULL;
886 struct tcp_handle *tcp = NULL;
887
888 if (unlikely(!node))
889 return;
890
891 hmdfs_node_inc_evt_seq(node);
892 /* Refer to comments in hmdfs_is_node_offlined() */
893 smp_mb__after_atomic();
894 node->status = NODE_STAT_OFFLINE;
895 hmdfs_info("Try to disconnect peer: device_id %llu", node->device_id);
896
897 mutex_lock(&node->conn_impl_list_lock);
898 if (!list_empty(&node->conn_impl_list))
899 list_replace_init(&node->conn_impl_list, &local_conns);
900 mutex_unlock(&node->conn_impl_list_lock);
901
902 list_for_each_entry_safe(conn_impl, next, &local_conns, list) {
903 tcp = conn_impl->connect_handle;
904 if (tcp && tcp->sock) {
905 kernel_sock_shutdown(tcp->sock, SHUT_RDWR);
906 hmdfs_info("shudown sock: fd = %d, refcount %ld",
907 tcp->fd, file_count(tcp->sock->file));
908 }
909 if (tcp)
910 tcp->fd = INVALID_SOCKET_FD;
911
912 tcp_close_socket(tcp);
913 list_del_init(&conn_impl->list);
914
915 connection_put(conn_impl);
916 }
917
918 if (wait_event_interruptible_timeout(node->deleting_list_wq,
919 hmdfs_conn_deleting_list_empty(node),
920 HMDFS_WAIT_CONN_RELEASE) <= 0)
921 hmdfs_dump_deleting_list(node);
922
923 /* wait all request process end */
924 spin_lock(&node->idr_lock);
925 while (node->msg_idr_process) {
926 spin_unlock(&node->idr_lock);
927 usleep_range(HMDFS_WAIT_REQUEST_END_MIN,
928 HMDFS_WAIT_REQUEST_END_MAX);
929 spin_lock(&node->idr_lock);
930 }
931 spin_unlock(&node->idr_lock);
932
933 hmdfs_queue_raw_node_evt(node, RAW_NODE_EVT_OFF);
934 }
935
hmdfs_run_simple_evt_cb(struct hmdfs_peer * node,int evt)936 static void hmdfs_run_simple_evt_cb(struct hmdfs_peer *node, int evt)
937 {
938 unsigned int seq = hmdfs_node_inc_evt_seq(node);
939
940 mutex_lock(&node->seq_lock);
941 hmdfs_node_call_evt_cb(node, evt, true, seq);
942 mutex_unlock(&node->seq_lock);
943 }
944
hmdfs_del_peer(struct hmdfs_peer * node)945 static void hmdfs_del_peer(struct hmdfs_peer *node)
946 {
947 /*
948 * No need for offline evt cb, because all files must
949 * have been flushed and closed, else the filesystem
950 * will be un-mountable.
951 */
952 cancel_delayed_work_sync(&node->evt_dwork);
953
954 hmdfs_run_simple_evt_cb(node, NODE_EVT_DEL);
955
956 hmdfs_release_peer_sysfs(node);
957
958 flush_workqueue(node->reget_conn_wq);
959 peer_put(node);
960 }
961
hmdfs_connections_stop(struct hmdfs_sb_info * sbi)962 void hmdfs_connections_stop(struct hmdfs_sb_info *sbi)
963 {
964 struct hmdfs_peer *node = NULL;
965 struct hmdfs_peer *con_tmp = NULL;
966
967 mutex_lock(&sbi->connections.node_lock);
968 list_for_each_entry_safe(node, con_tmp, &sbi->connections.node_list,
969 list) {
970 mutex_unlock(&sbi->connections.node_lock);
971 hmdfs_disconnect_node(node);
972 hmdfs_del_peer(node);
973 mutex_lock(&sbi->connections.node_lock);
974 }
975 mutex_unlock(&sbi->connections.node_lock);
976 }
977
get_conn_impl(struct hmdfs_peer * node,int connect_type)978 struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type)
979 {
980 struct connection *conn_impl = NULL;
981
982 if (!node)
983 return NULL;
984 mutex_lock(&node->conn_impl_list_lock);
985 list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
986 if (conn_impl->type == connect_type &&
987 conn_impl->status == CONNECT_STAT_WORKING) {
988 connection_get(conn_impl);
989 mutex_unlock(&node->conn_impl_list_lock);
990 return conn_impl;
991 }
992 }
993 mutex_unlock(&node->conn_impl_list_lock);
994 hmdfs_err_ratelimited("device %llu not find connection, type %d",
995 node->device_id, connect_type);
996 return NULL;
997 }
998
set_conn_sock_quickack(struct hmdfs_peer * node)999 void set_conn_sock_quickack(struct hmdfs_peer *node)
1000 {
1001 struct connection *conn_impl = NULL;
1002 struct tcp_handle *tcp = NULL;
1003 int option = 1;
1004
1005 if (!node)
1006 return;
1007 mutex_lock(&node->conn_impl_list_lock);
1008 list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
1009 if (conn_impl->type == CONNECT_TYPE_TCP &&
1010 conn_impl->status == CONNECT_STAT_WORKING &&
1011 conn_impl->connect_handle) {
1012 tcp = (struct tcp_handle *)(conn_impl->connect_handle);
1013 tcp_sock_set_quickack(tcp->sock->sk, option);
1014 }
1015 }
1016 mutex_unlock(&node->conn_impl_list_lock);
1017 }
1018
hmdfs_lookup_from_devid(struct hmdfs_sb_info * sbi,uint64_t device_id)1019 struct hmdfs_peer *hmdfs_lookup_from_devid(struct hmdfs_sb_info *sbi,
1020 uint64_t device_id)
1021 {
1022 struct hmdfs_peer *con = NULL;
1023 struct hmdfs_peer *lookup = NULL;
1024
1025 if (!sbi)
1026 return NULL;
1027 mutex_lock(&sbi->connections.node_lock);
1028 list_for_each_entry(con, &sbi->connections.node_list, list) {
1029 if (con->status != NODE_STAT_ONLINE ||
1030 con->device_id != device_id)
1031 continue;
1032 lookup = con;
1033 peer_get(lookup);
1034 break;
1035 }
1036 mutex_unlock(&sbi->connections.node_lock);
1037 return lookup;
1038 }
1039
hmdfs_lookup_from_cid(struct hmdfs_sb_info * sbi,uint8_t * cid)1040 struct hmdfs_peer *hmdfs_lookup_from_cid(struct hmdfs_sb_info *sbi,
1041 uint8_t *cid)
1042 {
1043 struct hmdfs_peer *con = NULL;
1044 struct hmdfs_peer *lookup = NULL;
1045
1046 if (!sbi)
1047 return NULL;
1048 mutex_lock(&sbi->connections.node_lock);
1049 list_for_each_entry(con, &sbi->connections.node_list, list) {
1050 if (strncmp(con->cid, cid, HMDFS_CID_SIZE) != 0)
1051 continue;
1052 lookup = con;
1053 peer_get(lookup);
1054 break;
1055 }
1056 mutex_unlock(&sbi->connections.node_lock);
1057 return lookup;
1058 }
1059
lookup_peer_by_cid_unsafe(struct hmdfs_sb_info * sbi,uint8_t * cid)1060 static struct hmdfs_peer *lookup_peer_by_cid_unsafe(struct hmdfs_sb_info *sbi,
1061 uint8_t *cid)
1062 {
1063 struct hmdfs_peer *node = NULL;
1064
1065 list_for_each_entry(node, &sbi->connections.node_list, list)
1066 if (!strncmp(node->cid, cid, HMDFS_CID_SIZE)) {
1067 peer_get(node);
1068 return node;
1069 }
1070 return NULL;
1071 }
1072
add_peer_unsafe(struct hmdfs_sb_info * sbi,struct hmdfs_peer * peer2add)1073 static struct hmdfs_peer *add_peer_unsafe(struct hmdfs_sb_info *sbi,
1074 struct hmdfs_peer *peer2add)
1075 {
1076 struct hmdfs_peer *peer;
1077 int err;
1078
1079 peer = lookup_peer_by_cid_unsafe(sbi, peer2add->cid);
1080 if (peer)
1081 return peer;
1082
1083 err = hmdfs_register_peer_sysfs(sbi, peer2add);
1084 if (err) {
1085 hmdfs_err("register peer %llu sysfs err %d",
1086 peer2add->device_id, err);
1087 return ERR_PTR(err);
1088 }
1089 list_add_tail(&peer2add->list, &sbi->connections.node_list);
1090 peer_get(peer2add);
1091 hmdfs_run_simple_evt_cb(peer2add, NODE_EVT_ADD);
1092 return peer2add;
1093 }
1094
alloc_peer(struct hmdfs_sb_info * sbi,uint8_t * cid,const struct connection_operations * conn_operations,uint32_t devsl)1095 static struct hmdfs_peer *alloc_peer(struct hmdfs_sb_info *sbi, uint8_t *cid,
1096 const struct connection_operations *conn_operations, uint32_t devsl)
1097 {
1098 struct hmdfs_peer *node = kzalloc(sizeof(*node), GFP_KERNEL);
1099
1100 if (!node)
1101 return NULL;
1102
1103 node->device_id = (u32)atomic_inc_return(&sbi->connections.conn_seq);
1104
1105 node->async_wq = alloc_workqueue("dfs_async%u_%llu", WQ_MEM_RECLAIM, 0,
1106 sbi->seq, node->device_id);
1107 if (!node->async_wq) {
1108 hmdfs_err("Failed to alloc async wq");
1109 goto out_err;
1110 }
1111 node->req_handle_wq = alloc_workqueue("dfs_req%u_%llu",
1112 WQ_UNBOUND | WQ_MEM_RECLAIM,
1113 sbi->async_req_max_active,
1114 sbi->seq, node->device_id);
1115 if (!node->req_handle_wq) {
1116 hmdfs_err("Failed to alloc req wq");
1117 goto out_err;
1118 }
1119 node->dentry_wq = alloc_workqueue("dfs_dentry%u_%llu",
1120 WQ_UNBOUND | WQ_MEM_RECLAIM,
1121 0, sbi->seq, node->device_id);
1122 if (!node->dentry_wq) {
1123 hmdfs_err("Failed to alloc dentry wq");
1124 goto out_err;
1125 }
1126 node->retry_wb_wq = alloc_workqueue("dfs_rwb%u_%llu",
1127 WQ_UNBOUND | WQ_MEM_RECLAIM,
1128 HMDFS_RETRY_WB_WQ_MAX_ACTIVE,
1129 sbi->seq, node->device_id);
1130 if (!node->retry_wb_wq) {
1131 hmdfs_err("Failed to alloc retry writeback wq");
1132 goto out_err;
1133 }
1134 node->reget_conn_wq = alloc_workqueue("dfs_regetcon%u_%llu",
1135 WQ_UNBOUND, 0,
1136 sbi->seq, node->device_id);
1137 if (!node->reget_conn_wq) {
1138 hmdfs_err("Failed to alloc reget conn wq");
1139 goto out_err;
1140 }
1141 INIT_LIST_HEAD(&node->conn_impl_list);
1142 mutex_init(&node->conn_impl_list_lock);
1143 INIT_LIST_HEAD(&node->conn_deleting_list);
1144 init_waitqueue_head(&node->deleting_list_wq);
1145 idr_init(&node->msg_idr);
1146 spin_lock_init(&node->idr_lock);
1147 idr_init(&node->file_id_idr);
1148 spin_lock_init(&node->file_id_lock);
1149 INIT_LIST_HEAD(&node->list);
1150 kref_init(&node->ref_cnt);
1151 node->owner = sbi->seq;
1152 node->conn_operations = conn_operations;
1153 node->sbi = sbi;
1154 node->status = NODE_STAT_SHAKING;
1155 node->conn_time = jiffies;
1156 memcpy(node->cid, cid, HMDFS_CID_SIZE);
1157 atomic64_set(&node->sb_dirty_count, 0);
1158 node->fid_cookie = 0;
1159 atomic_set(&node->evt_seq, 0);
1160 mutex_init(&node->seq_lock);
1161 mutex_init(&node->offline_cb_lock);
1162 mutex_init(&node->evt_lock);
1163 node->pending_evt = RAW_NODE_EVT_NR;
1164 node->last_evt = RAW_NODE_EVT_NR;
1165 node->cur_evt[0] = RAW_NODE_EVT_NR;
1166 node->cur_evt[1] = RAW_NODE_EVT_NR;
1167 node->seq_wr_idx = (unsigned char)UINT_MAX;
1168 node->seq_rd_idx = node->seq_wr_idx;
1169 INIT_DELAYED_WORK(&node->evt_dwork, hmdfs_node_evt_work);
1170 node->msg_idr_process = 0;
1171 node->offline_start = false;
1172 spin_lock_init(&node->wr_opened_inode_lock);
1173 INIT_LIST_HEAD(&node->wr_opened_inode_list);
1174 spin_lock_init(&node->stashed_inode_lock);
1175 node->stashed_inode_nr = 0;
1176 atomic_set(&node->rebuild_inode_status_nr, 0);
1177 init_waitqueue_head(&node->rebuild_inode_status_wq);
1178 INIT_LIST_HEAD(&node->stashed_inode_list);
1179 node->need_rebuild_stash_list = false;
1180 node->devsl = devsl;
1181
1182 return node;
1183
1184 out_err:
1185 if (node->async_wq) {
1186 destroy_workqueue(node->async_wq);
1187 node->async_wq = NULL;
1188 }
1189 if (node->req_handle_wq) {
1190 destroy_workqueue(node->req_handle_wq);
1191 node->req_handle_wq = NULL;
1192 }
1193 if (node->dentry_wq) {
1194 destroy_workqueue(node->dentry_wq);
1195 node->dentry_wq = NULL;
1196 }
1197 if (node->retry_wb_wq) {
1198 destroy_workqueue(node->retry_wb_wq);
1199 node->retry_wb_wq = NULL;
1200 }
1201 if (node->reget_conn_wq) {
1202 destroy_workqueue(node->reget_conn_wq);
1203 node->reget_conn_wq = NULL;
1204 }
1205 kfree(node);
1206 return NULL;
1207 }
1208
hmdfs_get_peer(struct hmdfs_sb_info * sbi,uint8_t * cid,uint32_t devsl)1209 struct hmdfs_peer *hmdfs_get_peer(struct hmdfs_sb_info *sbi, uint8_t *cid,
1210 uint32_t devsl)
1211 {
1212 struct hmdfs_peer *peer = NULL, *on_sbi_peer = NULL;
1213 const struct connection_operations *conn_opr_ptr = NULL;
1214
1215 mutex_lock(&sbi->connections.node_lock);
1216 peer = lookup_peer_by_cid_unsafe(sbi, cid);
1217 mutex_unlock(&sbi->connections.node_lock);
1218 if (peer) {
1219 hmdfs_info("Got a existing peer: device_id = %llu",
1220 peer->device_id);
1221 goto out;
1222 }
1223
1224 conn_opr_ptr = hmdfs_get_peer_operation(DFS_2_0);
1225 if (unlikely(!conn_opr_ptr)) {
1226 hmdfs_info("Fatal! Cannot get peer operation");
1227 goto out;
1228 }
1229 peer = alloc_peer(sbi, cid, conn_opr_ptr, devsl);
1230 if (unlikely(!peer)) {
1231 hmdfs_info("Failed to alloc a peer");
1232 goto out;
1233 }
1234
1235 mutex_lock(&sbi->connections.node_lock);
1236 on_sbi_peer = add_peer_unsafe(sbi, peer);
1237 mutex_unlock(&sbi->connections.node_lock);
1238 if (IS_ERR(on_sbi_peer)) {
1239 peer_put(peer);
1240 peer = NULL;
1241 goto out;
1242 } else if (unlikely(on_sbi_peer != peer)) {
1243 hmdfs_info("Got a existing peer: device_id = %llu",
1244 on_sbi_peer->device_id);
1245 peer_put(peer);
1246 peer = on_sbi_peer;
1247 } else {
1248 hmdfs_info("Got a newly allocated peer: device_id = %llu",
1249 peer->device_id);
1250 }
1251
1252 out:
1253 return peer;
1254 }
1255
head_release(struct kref * kref)1256 static void head_release(struct kref *kref)
1257 {
1258 struct hmdfs_msg_idr_head *head;
1259 struct hmdfs_peer *con;
1260
1261 head = (struct hmdfs_msg_idr_head *)container_of(kref,
1262 struct hmdfs_msg_idr_head, ref);
1263 con = head->peer;
1264 idr_remove(&con->msg_idr, head->msg_id);
1265 spin_unlock(&con->idr_lock);
1266
1267 kfree(head);
1268 }
1269
head_put(struct hmdfs_msg_idr_head * head)1270 void head_put(struct hmdfs_msg_idr_head *head)
1271 {
1272 kref_put_lock(&head->ref, head_release, &head->peer->idr_lock);
1273 }
1274
hmdfs_find_msg_head(struct hmdfs_peer * peer,int id)1275 struct hmdfs_msg_idr_head *hmdfs_find_msg_head(struct hmdfs_peer *peer, int id)
1276 {
1277 struct hmdfs_msg_idr_head *head = NULL;
1278
1279 spin_lock(&peer->idr_lock);
1280 head = idr_find(&peer->msg_idr, id);
1281 if (head)
1282 kref_get(&head->ref);
1283 spin_unlock(&peer->idr_lock);
1284
1285 return head;
1286 }
1287
hmdfs_alloc_msg_idr(struct hmdfs_peer * peer,enum MSG_IDR_TYPE type,void * ptr)1288 int hmdfs_alloc_msg_idr(struct hmdfs_peer *peer, enum MSG_IDR_TYPE type,
1289 void *ptr)
1290 {
1291 int ret = -EAGAIN;
1292 struct hmdfs_msg_idr_head *head = ptr;
1293 int end = peer->version < DFS_2_0 ? (USHRT_MAX + 1) : 0;
1294
1295 idr_preload(GFP_KERNEL);
1296 spin_lock(&peer->idr_lock);
1297 if (!peer->offline_start)
1298 ret = idr_alloc_cyclic(&peer->msg_idr, ptr,
1299 1, end, GFP_NOWAIT);
1300 if (ret >= 0) {
1301 kref_init(&head->ref);
1302 head->msg_id = ret;
1303 head->type = type;
1304 head->peer = peer;
1305 peer->msg_idr_process++;
1306 ret = 0;
1307 }
1308 spin_unlock(&peer->idr_lock);
1309 idr_preload_end();
1310
1311 return ret;
1312 }
1313