• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/hmdfs/comm/connection.c
4  *
5  * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6  */
7 
8 #include "connection.h"
9 
10 #include <linux/file.h>
11 #include <linux/freezer.h>
12 #include <linux/fs.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/tcp.h>
17 #include <linux/workqueue.h>
18 
19 #include "device_node.h"
20 #include "hmdfs.h"
21 #include "message_verify.h"
22 #include "node_cb.h"
23 #include "protocol.h"
24 #include "socket_adapter.h"
25 
26 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
27 #include "crypto.h"
28 #endif
29 
30 #define HMDFS_WAIT_REQUEST_END_MIN 20
31 #define HMDFS_WAIT_REQUEST_END_MAX 30
32 
33 #define HMDFS_WAIT_CONN_RELEASE (3 * HZ)
34 
35 #define HMDFS_RETRY_WB_WQ_MAX_ACTIVE 16
36 
hs_fill_crypto_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)37 static void hs_fill_crypto_data(struct connection *conn_impl, __u8 ops,
38 				void *data, __u32 len)
39 {
40 	struct crypto_body *body = NULL;
41 
42 	if (len < sizeof(struct crypto_body)) {
43 		hmdfs_info("crpto body len %u is err", len);
44 		return;
45 	}
46 	body = (struct crypto_body *)data;
47 
48 	/* this is only test, later need to fill right algorithm. */
49 	body->crypto |= HMDFS_HS_CRYPTO_KTLS_AES128;
50 	body->crypto = cpu_to_le32(body->crypto);
51 
52 	hmdfs_info("fill crypto. ccrtypto=0x%08x", body->crypto);
53 }
54 
hs_parse_crypto_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)55 static int hs_parse_crypto_data(struct connection *conn_impl, __u8 ops,
56 				 void *data, __u32 len)
57 {
58 	struct crypto_body *hs_crypto = NULL;
59 	uint32_t crypto;
60 
61 	if (len < sizeof(struct crypto_body)) {
62 		hmdfs_info("handshake msg len error, len=%u", len);
63 		return -1;
64 	}
65 	hs_crypto = (struct crypto_body *)data;
66 	crypto = le16_to_cpu(hs_crypto->crypto);
67 	conn_impl->crypto = crypto;
68 	hmdfs_info("ops=%u, len=%u, crypto=0x%08x", ops, len, crypto);
69 	return 0;
70 }
71 
hs_fill_case_sense_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)72 static void hs_fill_case_sense_data(struct connection *conn_impl, __u8 ops,
73 				    void *data, __u32 len)
74 {
75 	struct case_sense_body *body = (struct case_sense_body *)data;
76 
77 	if (len < sizeof(struct case_sense_body)) {
78 		hmdfs_err("case sensitive len %u is err", len);
79 		return;
80 	}
81 	body->case_sensitive = conn_impl->node->sbi->s_case_sensitive;
82 }
83 
hs_parse_case_sense_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)84 static int hs_parse_case_sense_data(struct connection *conn_impl, __u8 ops,
85 				     void *data, __u32 len)
86 {
87 	struct case_sense_body *body = (struct case_sense_body *)data;
88 	__u8 sensitive = conn_impl->node->sbi->s_case_sensitive ? 1 : 0;
89 
90 	if (len < sizeof(struct case_sense_body)) {
91 		hmdfs_info("case sensitive len %u is err", len);
92 		return -1;
93 	}
94 	if (body->case_sensitive != sensitive) {
95 		hmdfs_err("case sensitive inconsistent, server: %u,client: %u, ops: %u",
96 			  body->case_sensitive, sensitive, ops);
97 		return -1;
98 	}
99 	return 0;
100 }
101 
hs_fill_feature_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)102 static void hs_fill_feature_data(struct connection *conn_impl, __u8 ops,
103 				 void *data, __u32 len)
104 {
105 	struct feature_body *body = (struct feature_body *)data;
106 
107 	if (len < sizeof(struct feature_body)) {
108 		hmdfs_err("feature len %u is err", len);
109 		return;
110 	}
111 	body->features = cpu_to_le64(conn_impl->node->sbi->s_features);
112 	body->reserved = cpu_to_le64(0);
113 }
114 
hs_parse_feature_data(struct connection * conn_impl,__u8 ops,void * data,__u32 len)115 static int hs_parse_feature_data(struct connection *conn_impl, __u8 ops,
116 				 void *data, __u32 len)
117 {
118 	struct feature_body *body = (struct feature_body *)data;
119 
120 	if (len < sizeof(struct feature_body)) {
121 		hmdfs_err("feature len %u is err", len);
122 		return -1;
123 	}
124 
125 	conn_impl->node->features = le64_to_cpu(body->features);
126 	return 0;
127 }
128 
129 /* should ensure len is small than 0xffff. */
130 static const struct conn_hs_extend_reg s_hs_extend_reg[HS_EXTEND_CODE_COUNT] = {
131 	[HS_EXTEND_CODE_CRYPTO] = {
132 		.len = sizeof(struct crypto_body),
133 		.resv = 0,
134 		.filler = hs_fill_crypto_data,
135 		.parser = hs_parse_crypto_data
136 	},
137 	[HS_EXTEND_CODE_CASE_SENSE] = {
138 		.len = sizeof(struct case_sense_body),
139 		.resv = 0,
140 		.filler = hs_fill_case_sense_data,
141 		.parser = hs_parse_case_sense_data,
142 	},
143 	[HS_EXTEND_CODE_FEATURE_SUPPORT] = {
144 		.len = sizeof(struct feature_body),
145 		.resv = 0,
146 		.filler = hs_fill_feature_data,
147 		.parser = hs_parse_feature_data,
148 	},
149 	[HS_EXTEND_CODE_FEATURE_SUPPORT] = {
150 		.len = sizeof(struct feature_body),
151 		.resv = 0,
152 		.filler = hs_fill_feature_data,
153 		.parser = hs_parse_feature_data,
154 	},
155 };
156 
hs_get_extend_data_len(void)157 static __u32 hs_get_extend_data_len(void)
158 {
159 	__u32 len;
160 	int i;
161 
162 	len = sizeof(struct conn_hs_extend_head);
163 
164 	for (i = 0; i < HS_EXTEND_CODE_COUNT; i++) {
165 		len += sizeof(struct extend_field_head);
166 		len += s_hs_extend_reg[i].len;
167 	}
168 
169 	hmdfs_info("extend data total len is %u", len);
170 	return len;
171 }
172 
hs_fill_extend_data(struct connection * conn_impl,__u8 ops,void * extend_data,__u32 len)173 static void hs_fill_extend_data(struct connection *conn_impl, __u8 ops,
174 				void *extend_data, __u32 len)
175 {
176 	struct conn_hs_extend_head *extend_head = NULL;
177 	struct extend_field_head *field = NULL;
178 	uint8_t *body = NULL;
179 	__u32 offset;
180 	__u16 i;
181 
182 	if (sizeof(struct conn_hs_extend_head) > len) {
183 		hmdfs_info("len error. len=%u", len);
184 		return;
185 	}
186 	extend_head = (struct conn_hs_extend_head *)extend_data;
187 	extend_head->field_cn = 0;
188 	offset = sizeof(struct conn_hs_extend_head);
189 
190 	for (i = 0; i < HS_EXTEND_CODE_COUNT; i++) {
191 		if (sizeof(struct extend_field_head) > (len - offset))
192 			break;
193 		field = (struct extend_field_head *)((uint8_t *)extend_data +
194 						     offset);
195 		offset += sizeof(struct extend_field_head);
196 
197 		if (s_hs_extend_reg[i].len > (len - offset))
198 			break;
199 		body = (uint8_t *)extend_data + offset;
200 		offset += s_hs_extend_reg[i].len;
201 
202 		field->code = cpu_to_le16(i);
203 		field->len = cpu_to_le16(s_hs_extend_reg[i].len);
204 
205 		if (s_hs_extend_reg[i].filler)
206 			s_hs_extend_reg[i].filler(conn_impl, ops,
207 					body, s_hs_extend_reg[i].len);
208 
209 		extend_head->field_cn += 1;
210 	}
211 
212 	extend_head->field_cn = cpu_to_le32(extend_head->field_cn);
213 }
214 
hs_parse_extend_data(struct connection * conn_impl,__u8 ops,void * extend_data,__u32 extend_len)215 static int hs_parse_extend_data(struct connection *conn_impl, __u8 ops,
216 				void *extend_data, __u32 extend_len)
217 {
218 	struct conn_hs_extend_head *extend_head = NULL;
219 	struct extend_field_head *field = NULL;
220 	uint8_t *body = NULL;
221 	__u32 offset;
222 	__u32 field_cnt;
223 	__u16 code;
224 	__u16 len;
225 	int i;
226 	int ret;
227 
228 	if (sizeof(struct conn_hs_extend_head) > extend_len) {
229 		hmdfs_err("ops=%u,extend_len=%u", ops, extend_len);
230 		return -1;
231 	}
232 	extend_head = (struct conn_hs_extend_head *)extend_data;
233 	field_cnt = le32_to_cpu(extend_head->field_cn);
234 	hmdfs_info("extend_len=%u,field_cnt=%u", extend_len, field_cnt);
235 
236 	offset = sizeof(struct conn_hs_extend_head);
237 
238 	for (i = 0; i < field_cnt; i++) {
239 		if (sizeof(struct extend_field_head) > (extend_len - offset)) {
240 			hmdfs_err("cnt err, op=%u, extend_len=%u, cnt=%u, i=%u",
241 				  ops, extend_len, field_cnt, i);
242 			return -1;
243 		}
244 		field = (struct extend_field_head *)((uint8_t *)extend_data +
245 						     offset);
246 		offset += sizeof(struct extend_field_head);
247 		code = le16_to_cpu(field->code);
248 		len = le16_to_cpu(field->len);
249 		if (len > (extend_len - offset)) {
250 			hmdfs_err("len err, op=%u, extend_len=%u, cnt=%u, i=%u",
251 				  ops, extend_len, field_cnt, i);
252 			hmdfs_err("len err, code=%u, len=%u, offset=%u", code,
253 				  len, offset);
254 			return -1;
255 		}
256 
257 		body = (uint8_t *)extend_data + offset;
258 		offset += len;
259 		if ((code < HS_EXTEND_CODE_COUNT) &&
260 		    (s_hs_extend_reg[code].parser)) {
261 			ret = s_hs_extend_reg[code].parser(conn_impl, ops,
262 							   body, len);
263 			if (ret)
264 				return ret;
265 		}
266 	}
267 	return 0;
268 }
269 
hs_proc_msg_data(struct connection * conn_impl,__u8 ops,void * data,__u32 data_len)270 static int hs_proc_msg_data(struct connection *conn_impl, __u8 ops, void *data,
271 			    __u32 data_len)
272 {
273 	struct connection_handshake_req *hs_req = NULL;
274 	uint8_t *extend_data = NULL;
275 	__u32 extend_len;
276 	__u32 req_len;
277 	int ret;
278 
279 	if (!data) {
280 		hmdfs_err("err, msg data is null");
281 		return -1;
282 	}
283 
284 	if (data_len < sizeof(struct connection_handshake_req)) {
285 		hmdfs_err("ack msg data len error. data_len=%u, device_id=%llu",
286 			  data_len, conn_impl->node->device_id);
287 		return -1;
288 	}
289 
290 	hs_req = (struct connection_handshake_req *)data;
291 	req_len = le32_to_cpu(hs_req->len);
292 	if (req_len > (data_len - sizeof(struct connection_handshake_req))) {
293 		hmdfs_info(
294 			"ack msg hs_req len(%u) error. data_len=%u, device_id=%llu",
295 			req_len, data_len, conn_impl->node->device_id);
296 		return -1;
297 	}
298 	extend_len =
299 		data_len - sizeof(struct connection_handshake_req) - req_len;
300 	extend_data = (uint8_t *)data +
301 		      sizeof(struct connection_handshake_req) + req_len;
302 	ret = hs_parse_extend_data(conn_impl, ops, extend_data, extend_len);
303 	if (!ret)
304 		hmdfs_info(
305 			"hs msg rcv, ops=%u, data_len=%u, device_id=%llu, req_len=%u",
306 			ops, data_len, conn_impl->node->device_id, hs_req->len);
307 	return ret;
308 }
309 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
connection_handshake_init_tls(struct connection * conn_impl,__u8 ops)310 static int connection_handshake_init_tls(struct connection *conn_impl, __u8 ops)
311 {
312 	// init ktls config, use key1/key2 as init write-key of each direction
313 	__u8 key1[HMDFS_KEY_SIZE];
314 	__u8 key2[HMDFS_KEY_SIZE];
315 	int ret;
316 
317 	if ((ops != CONNECT_MESG_HANDSHAKE_RESPONSE) &&
318 	    (ops != CONNECT_MESG_HANDSHAKE_ACK)) {
319 		hmdfs_err("ops %u is err", ops);
320 		return -EINVAL;
321 	}
322 
323 	update_key(conn_impl->master_key, key1, HKDF_TYPE_KEY_INITIATOR);
324 	update_key(conn_impl->master_key, key2, HKDF_TYPE_KEY_ACCEPTER);
325 
326 	if (ops == CONNECT_MESG_HANDSHAKE_ACK) {
327 		memcpy(conn_impl->send_key, key1, HMDFS_KEY_SIZE);
328 		memcpy(conn_impl->recv_key, key2, HMDFS_KEY_SIZE);
329 	} else {
330 		memcpy(conn_impl->send_key, key2, HMDFS_KEY_SIZE);
331 		memcpy(conn_impl->recv_key, key1, HMDFS_KEY_SIZE);
332 	}
333 
334 	memset(key1, 0, HMDFS_KEY_SIZE);
335 	memset(key2, 0, HMDFS_KEY_SIZE);
336 
337 	hmdfs_info("hs: ops=%u start set crypto tls", ops);
338 	ret = tls_crypto_info_init(conn_impl);
339 	if (ret)
340 		hmdfs_err("setting tls fail. ops is %u", ops);
341 
342 	return ret;
343 }
344 #endif
345 
do_send_handshake(struct connection * conn_impl,__u8 ops,__le16 request_id)346 static int do_send_handshake(struct connection *conn_impl, __u8 ops,
347 			     __le16 request_id)
348 {
349 	int err;
350 	struct connection_msg_head *hs_head = NULL;
351 	struct connection_handshake_req *hs_data = NULL;
352 	uint8_t *hs_extend_data = NULL;
353 	struct hmdfs_send_data msg;
354 	__u32 send_len;
355 	__u32 len;
356 	__u32 extend_len;
357 	char buf[HMDFS_CID_SIZE] = { 0 };
358 
359 	len = scnprintf(buf, HMDFS_CID_SIZE, "%llu", 0ULL);
360 	send_len = sizeof(struct connection_msg_head) +
361 		   sizeof(struct connection_handshake_req) + len;
362 
363 	if (((ops == CONNECT_MESG_HANDSHAKE_RESPONSE) ||
364 	     (ops == CONNECT_MESG_HANDSHAKE_ACK))) {
365 		extend_len = hs_get_extend_data_len();
366 		send_len += extend_len;
367 	}
368 
369 	hs_head = kzalloc(send_len, GFP_KERNEL);
370 	if (!hs_head)
371 		return -ENOMEM;
372 
373 	hs_data = (struct connection_handshake_req
374 			   *)((uint8_t *)hs_head +
375 			      sizeof(struct connection_msg_head));
376 
377 	hs_data->len = cpu_to_le32(len);
378 	memcpy(hs_data->dev_id, buf, len);
379 
380 	if (((ops == CONNECT_MESG_HANDSHAKE_RESPONSE) ||
381 	     ops == CONNECT_MESG_HANDSHAKE_ACK)) {
382 		hs_extend_data = (uint8_t *)hs_data +
383 				  sizeof(struct connection_handshake_req) + len;
384 		hs_fill_extend_data(conn_impl, ops, hs_extend_data, extend_len);
385 	}
386 
387 	hs_head->magic = HMDFS_MSG_MAGIC;
388 	hs_head->version = HMDFS_VERSION;
389 	hs_head->flags |= 0x1;
390 	hmdfs_info("Send handshake message: ops = %d, fd = %d", ops,
391 		   ((struct tcp_handle *)(conn_impl->connect_handle))->fd);
392 	hs_head->operations = ops;
393 	hs_head->request_id = request_id;
394 	hs_head->datasize = cpu_to_le32(send_len);
395 	hs_head->source = 0;
396 	hs_head->msg_id = 0;
397 
398 	msg.head = hs_head;
399 	msg.head_len = sizeof(struct connection_msg_head);
400 	msg.data = hs_data;
401 	msg.len = send_len - msg.head_len;
402 	msg.sdesc = NULL;
403 	msg.sdesc_len = 0;
404 	err = conn_impl->send_message(conn_impl, &msg);
405 	kfree(hs_head);
406 	return err;
407 }
408 
hmdfs_node_waiting_evt_sum(const struct hmdfs_peer * node)409 static int hmdfs_node_waiting_evt_sum(const struct hmdfs_peer *node)
410 {
411 	int sum = 0;
412 	int i;
413 
414 	for (i = 0; i < RAW_NODE_EVT_NR; i++)
415 		sum += node->waiting_evt[i];
416 
417 	return sum;
418 }
419 
hmdfs_update_node_waiting_evt(struct hmdfs_peer * node,int evt,unsigned int * seq)420 static int hmdfs_update_node_waiting_evt(struct hmdfs_peer *node, int evt,
421 					 unsigned int *seq)
422 {
423 	int last;
424 	int sum;
425 	unsigned int next;
426 
427 	sum = hmdfs_node_waiting_evt_sum(node);
428 	if (sum % RAW_NODE_EVT_NR)
429 		last = !node->pending_evt;
430 	else
431 		last = node->pending_evt;
432 
433 	/* duplicated event */
434 	if (evt == last) {
435 		node->dup_evt[evt]++;
436 		return 0;
437 	}
438 
439 	node->waiting_evt[evt]++;
440 	hmdfs_debug("add node->waiting_evt[%d]=%d", evt,
441 		    node->waiting_evt[evt]);
442 
443 	/* offline wait + online wait + offline wait = offline wait
444 	 * online wait + offline wait + online wait != online wait
445 	 * As the first online related resource (e.g. fd) must be invalidated
446 	 */
447 	if (node->waiting_evt[RAW_NODE_EVT_OFF] >= 2 &&
448 	    node->waiting_evt[RAW_NODE_EVT_ON] >= 1) {
449 		node->waiting_evt[RAW_NODE_EVT_OFF] -= 1;
450 		node->waiting_evt[RAW_NODE_EVT_ON] -= 1;
451 		node->seq_wr_idx -= 2;
452 		node->merged_evt += 2;
453 	}
454 
455 	next = hmdfs_node_inc_evt_seq(node);
456 	node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = next;
457 	*seq = next;
458 
459 	return 1;
460 }
461 
hmdfs_run_evt_cb_verbosely(struct hmdfs_peer * node,int raw_evt,bool sync,unsigned int seq)462 static void hmdfs_run_evt_cb_verbosely(struct hmdfs_peer *node, int raw_evt,
463 				       bool sync, unsigned int seq)
464 {
465 	int evt = (raw_evt == RAW_NODE_EVT_OFF) ? NODE_EVT_OFFLINE :
466 						  NODE_EVT_ONLINE;
467 	int cur_evt_idx = sync ? 1 : 0;
468 
469 	node->cur_evt[cur_evt_idx] = raw_evt;
470 	node->cur_evt_seq[cur_evt_idx] = seq;
471 	hmdfs_node_call_evt_cb(node, evt, sync, seq);
472 	node->cur_evt[cur_evt_idx] = RAW_NODE_EVT_NR;
473 }
474 
hmdfs_node_evt_work(struct work_struct * work)475 static void hmdfs_node_evt_work(struct work_struct *work)
476 {
477 	struct hmdfs_peer *node =
478 		container_of(work, struct hmdfs_peer, evt_dwork.work);
479 	unsigned int seq;
480 
481 	/*
482 	 * N-th sync cb completes before N-th async cb,
483 	 * so use seq_lock as a barrier in read & write path
484 	 * to ensure we can read the required seq.
485 	 */
486 	mutex_lock(&node->seq_lock);
487 	seq = node->seq_tbl[(node->seq_rd_idx++) % RAW_NODE_EVT_MAX_NR];
488 	hmdfs_run_evt_cb_verbosely(node, node->pending_evt, false, seq);
489 	mutex_unlock(&node->seq_lock);
490 
491 	mutex_lock(&node->evt_lock);
492 	if (hmdfs_node_waiting_evt_sum(node)) {
493 		node->pending_evt = !node->pending_evt;
494 		node->pending_evt_seq =
495 			node->seq_tbl[node->seq_rd_idx % RAW_NODE_EVT_MAX_NR];
496 		node->waiting_evt[node->pending_evt]--;
497 		/* sync cb has been done */
498 		schedule_delayed_work(&node->evt_dwork,
499 				      node->sbi->async_cb_delay * HZ);
500 	} else {
501 		node->last_evt = node->pending_evt;
502 		node->pending_evt = RAW_NODE_EVT_NR;
503 	}
504 	mutex_unlock(&node->evt_lock);
505 }
506 
507 /*
508  * The running orders of cb are:
509  *
510  * (1) sync callbacks are invoked according to the queue order of raw events:
511  *     ensured by seq_lock.
512  * (2) async callbacks are invoked according to the queue order of raw events:
513  *     ensured by evt_lock & evt_dwork
514  * (3) async callback is invoked after sync callback of the same raw event:
515  *     ensured by seq_lock.
516  * (4) async callback of N-th raw event and sync callback of (N+x)-th raw
517  *     event can run concurrently.
518  */
hmdfs_queue_raw_node_evt(struct hmdfs_peer * node,int evt)519 static void hmdfs_queue_raw_node_evt(struct hmdfs_peer *node, int evt)
520 {
521 	unsigned int seq = 0;
522 
523 	mutex_lock(&node->evt_lock);
524 	if (node->pending_evt == RAW_NODE_EVT_NR) {
525 		if (evt == node->last_evt) {
526 			node->dup_evt[evt]++;
527 			mutex_unlock(&node->evt_lock);
528 			return;
529 		}
530 		node->pending_evt = evt;
531 		seq = hmdfs_node_inc_evt_seq(node);
532 		node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = seq;
533 		node->pending_evt_seq = seq;
534 		mutex_lock(&node->seq_lock);
535 		mutex_unlock(&node->evt_lock);
536 		/* call sync cb, then async cb */
537 		hmdfs_run_evt_cb_verbosely(node, evt, true, seq);
538 		mutex_unlock(&node->seq_lock);
539 		schedule_delayed_work(&node->evt_dwork,
540 				      node->sbi->async_cb_delay * HZ);
541 	} else if (hmdfs_update_node_waiting_evt(node, evt, &seq) > 0) {
542 		/*
543 		 * Take seq_lock firstly to ensure N-th sync cb
544 		 * is called before N-th async cb.
545 		 */
546 		mutex_lock(&node->seq_lock);
547 		mutex_unlock(&node->evt_lock);
548 		hmdfs_run_evt_cb_verbosely(node, evt, true, seq);
549 		mutex_unlock(&node->seq_lock);
550 	} else {
551 		mutex_unlock(&node->evt_lock);
552 	}
553 }
554 
connection_send_handshake(struct connection * conn_impl,__u8 ops,__le16 request_id)555 void connection_send_handshake(struct connection *conn_impl, __u8 ops,
556 			       __le16 request_id)
557 {
558 	struct tcp_handle *tcp = NULL;
559 	int err = do_send_handshake(conn_impl, ops, request_id);
560 
561 	if (likely(err >= 0))
562 		return;
563 
564 	tcp = conn_impl->connect_handle;
565 	hmdfs_err("Failed to send handshake: err = %d, fd = %d", err, tcp->fd);
566 	hmdfs_reget_connection(conn_impl);
567 }
568 
connection_handshake_notify(struct hmdfs_peer * node,int notify_type)569 void connection_handshake_notify(struct hmdfs_peer *node, int notify_type)
570 {
571 	struct notify_param param;
572 
573 	param.notify = notify_type;
574 	param.fd = INVALID_SOCKET_FD;
575 	memcpy(param.remote_cid, node->cid, HMDFS_CID_SIZE);
576 	notify(node, &param);
577 }
578 
579 
peer_online(struct hmdfs_peer * peer)580 void peer_online(struct hmdfs_peer *peer)
581 {
582 	// To evaluate if someone else has made the peer online
583 	u8 prev_stat = xchg(&peer->status, NODE_STAT_ONLINE);
584 	unsigned long jif_tmp = jiffies;
585 
586 	if (prev_stat == NODE_STAT_ONLINE)
587 		return;
588 	WRITE_ONCE(peer->conn_time, jif_tmp);
589 	WRITE_ONCE(peer->sbi->connections.recent_ol, jif_tmp);
590 	hmdfs_queue_raw_node_evt(peer, RAW_NODE_EVT_ON);
591 }
592 
connection_to_working(struct hmdfs_peer * node)593 void connection_to_working(struct hmdfs_peer *node)
594 {
595 	struct connection *conn_impl = NULL;
596 	struct tcp_handle *tcp = NULL;
597 
598 	if (!node)
599 		return;
600 	mutex_lock(&node->conn_impl_list_lock);
601 	list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
602 		if (conn_impl->type == CONNECT_TYPE_TCP &&
603 		    conn_impl->status == CONNECT_STAT_WAIT_RESPONSE) {
604 			tcp = conn_impl->connect_handle;
605 			hmdfs_info("fd %d to working", tcp->fd);
606 			conn_impl->status = CONNECT_STAT_WORKING;
607 		}
608 	}
609 	mutex_unlock(&node->conn_impl_list_lock);
610 	peer_online(node);
611 }
612 
connection_handshake_recv_handler(struct connection * conn_impl,void * buf,void * data,__u32 data_len)613 void connection_handshake_recv_handler(struct connection *conn_impl, void *buf,
614 				       void *data, __u32 data_len)
615 {
616 	__u8 ops;
617 	__u8 status;
618 	int fd = ((struct tcp_handle *)(conn_impl->connect_handle))->fd;
619 	struct connection_msg_head *head = (struct connection_msg_head *)buf;
620 	int ret;
621 
622 	if (head->version != HMDFS_VERSION)
623 		goto out;
624 
625 	conn_impl->node->version = head->version;
626 	ops = head->operations;
627 	status = conn_impl->status;
628 	switch (ops) {
629 	case CONNECT_MESG_HANDSHAKE_REQUEST:
630 		hmdfs_info(
631 			"Recved handshake request: device_id = %llu, head->len = %d, tcp->fd = %d",
632 			conn_impl->node->device_id, head->datasize, fd);
633 		connection_send_handshake(conn_impl,
634 					  CONNECT_MESG_HANDSHAKE_RESPONSE,
635 					  head->msg_id);
636 		conn_impl->status = CONNECT_STAT_WAIT_ACK;
637 		conn_impl->node->status = NODE_STAT_SHAKING;
638 		break;
639 	case CONNECT_MESG_HANDSHAKE_RESPONSE:
640 		hmdfs_info(
641 			"Recved handshake response: device_id = %llu, cmd->status = %hhu, tcp->fd = %d",
642 			conn_impl->node->device_id, status, fd);
643 		if (status == CONNECT_STAT_WAIT_REQUEST) {
644 			// must be 10.1 device, no need to set ktls
645 			connection_to_working(conn_impl->node);
646 			goto out;
647 		}
648 
649 		ret = hs_proc_msg_data(conn_impl, ops, data, data_len);
650 		if (ret)
651 			goto nego_err;
652 		connection_send_handshake(conn_impl,
653 					  CONNECT_MESG_HANDSHAKE_ACK,
654 					  head->msg_id);
655 		hmdfs_info("respon rcv handle,conn_impl->crypto=0x%0x",
656 				conn_impl->crypto);
657 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
658 		ret = connection_handshake_init_tls(conn_impl, ops);
659 		if (ret) {
660 			hmdfs_err("init_tls_key fail, ops %u", ops);
661 			goto out;
662 		}
663 #endif
664 
665 		conn_impl->status = CONNECT_STAT_WORKING;
666 		peer_online(conn_impl->node);
667 		break;
668 	case CONNECT_MESG_HANDSHAKE_ACK:
669 		ret = hs_proc_msg_data(conn_impl, ops, data, data_len);
670 		if (ret)
671 			goto nego_err;
672 		hmdfs_info("ack rcv handle, conn_impl->crypto=0x%0x",
673 				conn_impl->crypto);
674 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
675 		ret = connection_handshake_init_tls(conn_impl, ops);
676 		if (ret) {
677 			hmdfs_err("init_tls_key fail, ops %u", ops);
678 			goto out;
679 		}
680 #endif
681 		conn_impl->status = CONNECT_STAT_WORKING;
682 		peer_online(conn_impl->node);
683 		break;
684 		fallthrough;
685 	default:
686 		break;
687 	}
688 out:
689 	kfree(data);
690 	return;
691 nego_err:
692 	conn_impl->status = CONNECT_STAT_NEGO_FAIL;
693 	connection_handshake_notify(conn_impl->node, NOTIFY_OFFLINE);
694 	hmdfs_err("protocol negotiation failed, remote device_id = %llu, tcp->fd = %d",
695 		  conn_impl->node->device_id, fd);
696 	goto out;
697 }
698 
699 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
update_tls_crypto_key(struct connection * conn,struct hmdfs_head_cmd * head,void * data,__u32 data_len)700 static void update_tls_crypto_key(struct connection *conn,
701 				  struct hmdfs_head_cmd *head, void *data,
702 				  __u32 data_len)
703 {
704 	// rekey message handler
705 	struct connection_rekey_request *rekey_req = NULL;
706 	int ret = 0;
707 
708 	if (hmdfs_message_verify(conn->node, head, data) < 0) {
709 		hmdfs_err("Rekey msg %d has been abandoned", head->msg_id);
710 		goto out_err;
711 	}
712 
713 	hmdfs_info("recv REKEY request");
714 	set_crypto_info(conn, SET_CRYPTO_RECV);
715 	// update send key if requested
716 	rekey_req = data;
717 	if (le32_to_cpu(rekey_req->update_request) == UPDATE_REQUESTED) {
718 		ret = tcp_send_rekey_request(conn);
719 		if (ret == 0)
720 			set_crypto_info(conn, SET_CRYPTO_SEND);
721 	}
722 out_err:
723 	kfree(data);
724 }
725 
cmd_update_tls_crypto_key(struct connection * conn,struct hmdfs_head_cmd * head)726 static bool cmd_update_tls_crypto_key(struct connection *conn,
727 				      struct hmdfs_head_cmd *head)
728 {
729 	struct tcp_handle *tcp = conn->connect_handle;
730 
731 	if (conn->type != CONNECT_TYPE_TCP || !tcp)
732 		return false;
733 	return head->operations.command == F_CONNECT_REKEY;
734 }
735 #endif
736 
connection_working_recv_handler(struct connection * conn_impl,void * buf,void * data,__u32 data_len)737 void connection_working_recv_handler(struct connection *conn_impl, void *buf,
738 				     void *data, __u32 data_len)
739 {
740 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
741 	if (cmd_update_tls_crypto_key(conn_impl, buf)) {
742 		update_tls_crypto_key(conn_impl, buf, data, data_len);
743 		return;
744 	}
745 #endif
746 	hmdfs_recv_mesg_callback(conn_impl->node, buf, data);
747 }
748 
connection_release(struct kref * ref)749 static void connection_release(struct kref *ref)
750 {
751 	struct tcp_handle *tcp = NULL;
752 	struct connection *conn = container_of(ref, struct connection, ref_cnt);
753 
754 	hmdfs_info("connection release");
755 	memset(conn->master_key, 0, HMDFS_KEY_SIZE);
756 	memset(conn->send_key, 0, HMDFS_KEY_SIZE);
757 	memset(conn->recv_key, 0, HMDFS_KEY_SIZE);
758 	if (conn->close)
759 		conn->close(conn);
760 	tcp = conn->connect_handle;
761 	crypto_free_aead(conn->tfm);
762 	// need to check and test: fput(tcp->sock->file);
763 	if (tcp && tcp->sock) {
764 		hmdfs_info("connection release: fd = %d, refcount %ld", tcp->fd,
765 			   file_count(tcp->sock->file));
766 		sockfd_put(tcp->sock);
767 	}
768 	if (tcp && tcp->recv_cache)
769 		kmem_cache_destroy(tcp->recv_cache);
770 
771 	if (!list_empty(&conn->list)) {
772 		mutex_lock(&conn->node->conn_impl_list_lock);
773 		list_del(&conn->list);
774 		mutex_unlock(&conn->node->conn_impl_list_lock);
775 		/*
776 		 * wakup hmdfs_disconnect_node to check
777 		 * conn_deleting_list if empty.
778 		 */
779 		wake_up_interruptible(&conn->node->deleting_list_wq);
780 	}
781 
782 	kfree(tcp);
783 	kfree(conn);
784 }
785 
hmdfs_peer_release(struct kref * ref)786 static void hmdfs_peer_release(struct kref *ref)
787 {
788 	struct hmdfs_peer *peer = container_of(ref, struct hmdfs_peer, ref_cnt);
789 	struct mutex *lock = &peer->sbi->connections.node_lock;
790 
791 	if (!list_empty(&peer->list))
792 		hmdfs_info("releasing a on-sbi peer: device_id %llu ",
793 			   peer->device_id);
794 	else
795 		hmdfs_info("releasing a redundant peer: device_id %llu ",
796 			   peer->device_id);
797 
798 	cancel_delayed_work_sync(&peer->evt_dwork);
799 	list_del(&peer->list);
800 	idr_destroy(&peer->msg_idr);
801 	idr_destroy(&peer->file_id_idr);
802 	flush_workqueue(peer->req_handle_wq);
803 	flush_workqueue(peer->async_wq);
804 	flush_workqueue(peer->retry_wb_wq);
805 	destroy_workqueue(peer->dentry_wq);
806 	destroy_workqueue(peer->req_handle_wq);
807 	destroy_workqueue(peer->async_wq);
808 	destroy_workqueue(peer->retry_wb_wq);
809 	destroy_workqueue(peer->reget_conn_wq);
810 	kfree(peer);
811 	mutex_unlock(lock);
812 }
813 
connection_put(struct connection * conn)814 void connection_put(struct connection *conn)
815 {
816 	struct mutex *lock = &conn->ref_lock;
817 
818 	kref_put_mutex(&conn->ref_cnt, connection_release, lock);
819 }
820 
peer_put(struct hmdfs_peer * peer)821 void peer_put(struct hmdfs_peer *peer)
822 {
823 	struct mutex *lock = &peer->sbi->connections.node_lock;
824 
825 	kref_put_mutex(&peer->ref_cnt, hmdfs_peer_release, lock);
826 }
827 
hmdfs_dump_deleting_list(struct hmdfs_peer * node)828 static void hmdfs_dump_deleting_list(struct hmdfs_peer *node)
829 {
830 	struct connection *con = NULL;
831 	struct tcp_handle *tcp = NULL;
832 	int count = 0;
833 
834 	mutex_lock(&node->conn_impl_list_lock);
835 	list_for_each_entry(con, &node->conn_deleting_list, list) {
836 		tcp = con->connect_handle;
837 		hmdfs_info("deleting list %d:device_id %llu tcp_fd %d refcnt %d",
838 			   count, node->device_id, tcp ? tcp->fd : -1,
839 			   kref_read(&con->ref_cnt));
840 		count++;
841 	}
842 	mutex_unlock(&node->conn_impl_list_lock);
843 }
844 
hmdfs_conn_deleting_list_empty(struct hmdfs_peer * node)845 static bool hmdfs_conn_deleting_list_empty(struct hmdfs_peer *node)
846 {
847 	bool empty = false;
848 
849 	mutex_lock(&node->conn_impl_list_lock);
850 	empty = list_empty(&node->conn_deleting_list);
851 	mutex_unlock(&node->conn_impl_list_lock);
852 
853 	return empty;
854 }
855 
hmdfs_disconnect_node(struct hmdfs_peer * node)856 void hmdfs_disconnect_node(struct hmdfs_peer *node)
857 {
858 	LIST_HEAD(local_conns);
859 	struct connection *conn_impl = NULL;
860 	struct connection *next = NULL;
861 	struct tcp_handle *tcp = NULL;
862 
863 	if (unlikely(!node))
864 		return;
865 
866 	hmdfs_node_inc_evt_seq(node);
867 	/* Refer to comments in hmdfs_is_node_offlined() */
868 	smp_mb__after_atomic();
869 	node->status = NODE_STAT_OFFLINE;
870 	hmdfs_info("Try to disconnect peer: device_id %llu", node->device_id);
871 
872 	mutex_lock(&node->conn_impl_list_lock);
873 	if (!list_empty(&node->conn_impl_list))
874 		list_replace_init(&node->conn_impl_list, &local_conns);
875 	mutex_unlock(&node->conn_impl_list_lock);
876 
877 	list_for_each_entry_safe(conn_impl, next, &local_conns, list) {
878 		tcp = conn_impl->connect_handle;
879 		if (tcp && tcp->sock) {
880 			kernel_sock_shutdown(tcp->sock, SHUT_RDWR);
881 			hmdfs_info("shudown sock: fd = %d, refcount %ld",
882 				   tcp->fd, file_count(tcp->sock->file));
883 		}
884 		if (tcp)
885 			tcp->fd = INVALID_SOCKET_FD;
886 
887 		tcp_close_socket(tcp);
888 		list_del_init(&conn_impl->list);
889 
890 		connection_put(conn_impl);
891 	}
892 
893 	if (wait_event_interruptible_timeout(node->deleting_list_wq,
894 					hmdfs_conn_deleting_list_empty(node),
895 					HMDFS_WAIT_CONN_RELEASE) <= 0)
896 		hmdfs_dump_deleting_list(node);
897 
898 	/* wait all request process end */
899 	spin_lock(&node->idr_lock);
900 	while (node->msg_idr_process) {
901 		spin_unlock(&node->idr_lock);
902 		usleep_range(HMDFS_WAIT_REQUEST_END_MIN,
903 			     HMDFS_WAIT_REQUEST_END_MAX);
904 		spin_lock(&node->idr_lock);
905 	}
906 	spin_unlock(&node->idr_lock);
907 
908 	hmdfs_queue_raw_node_evt(node, RAW_NODE_EVT_OFF);
909 }
910 
hmdfs_run_simple_evt_cb(struct hmdfs_peer * node,int evt)911 static void hmdfs_run_simple_evt_cb(struct hmdfs_peer *node, int evt)
912 {
913 	unsigned int seq = hmdfs_node_inc_evt_seq(node);
914 
915 	mutex_lock(&node->seq_lock);
916 	hmdfs_node_call_evt_cb(node, evt, true, seq);
917 	mutex_unlock(&node->seq_lock);
918 }
919 
hmdfs_del_peer(struct hmdfs_peer * node)920 static void hmdfs_del_peer(struct hmdfs_peer *node)
921 {
922 	/*
923 	 * No need for offline evt cb, because all files must
924 	 * have been flushed and closed, else the filesystem
925 	 * will be un-mountable.
926 	 */
927 	cancel_delayed_work_sync(&node->evt_dwork);
928 
929 	hmdfs_run_simple_evt_cb(node, NODE_EVT_DEL);
930 
931 	hmdfs_release_peer_sysfs(node);
932 
933 	flush_workqueue(node->reget_conn_wq);
934 	peer_put(node);
935 }
936 
hmdfs_connections_stop(struct hmdfs_sb_info * sbi)937 void hmdfs_connections_stop(struct hmdfs_sb_info *sbi)
938 {
939 	struct hmdfs_peer *node = NULL;
940 	struct hmdfs_peer *con_tmp = NULL;
941 
942 	mutex_lock(&sbi->connections.node_lock);
943 	list_for_each_entry_safe(node, con_tmp, &sbi->connections.node_list,
944 				  list) {
945 		mutex_unlock(&sbi->connections.node_lock);
946 		hmdfs_disconnect_node(node);
947 		hmdfs_del_peer(node);
948 		mutex_lock(&sbi->connections.node_lock);
949 	}
950 	mutex_unlock(&sbi->connections.node_lock);
951 }
952 
get_conn_impl(struct hmdfs_peer * node,int connect_type)953 struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type)
954 {
955 	struct connection *conn_impl = NULL;
956 
957 	if (!node)
958 		return NULL;
959 	mutex_lock(&node->conn_impl_list_lock);
960 	list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
961 		if (conn_impl->type == connect_type &&
962 		    conn_impl->status == CONNECT_STAT_WORKING) {
963 			connection_get(conn_impl);
964 			mutex_unlock(&node->conn_impl_list_lock);
965 			return conn_impl;
966 		}
967 	}
968 	mutex_unlock(&node->conn_impl_list_lock);
969 	hmdfs_err_ratelimited("device %llu not find connection, type %d",
970 			      node->device_id, connect_type);
971 	return NULL;
972 }
973 
set_conn_sock_quickack(struct hmdfs_peer * node)974 void set_conn_sock_quickack(struct hmdfs_peer *node)
975 {
976 	struct connection *conn_impl = NULL;
977 	struct tcp_handle *tcp = NULL;
978 	int option = 1;
979 
980 	if (!node)
981 		return;
982 	mutex_lock(&node->conn_impl_list_lock);
983 	list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
984 		if (conn_impl->type == CONNECT_TYPE_TCP &&
985 		    conn_impl->status == CONNECT_STAT_WORKING &&
986 		    conn_impl->connect_handle) {
987 			tcp = (struct tcp_handle *)(conn_impl->connect_handle);
988 			tcp_sock_set_quickack(tcp->sock->sk, option);
989 		}
990 	}
991 	mutex_unlock(&node->conn_impl_list_lock);
992 }
993 
hmdfs_lookup_from_devid(struct hmdfs_sb_info * sbi,uint64_t device_id)994 struct hmdfs_peer *hmdfs_lookup_from_devid(struct hmdfs_sb_info *sbi,
995 					   uint64_t device_id)
996 {
997 	struct hmdfs_peer *con = NULL;
998 	struct hmdfs_peer *lookup = NULL;
999 
1000 	if (!sbi)
1001 		return NULL;
1002 	mutex_lock(&sbi->connections.node_lock);
1003 	list_for_each_entry(con, &sbi->connections.node_list, list) {
1004 		if (con->status != NODE_STAT_ONLINE ||
1005 		    con->device_id != device_id)
1006 			continue;
1007 		lookup = con;
1008 		peer_get(lookup);
1009 		break;
1010 	}
1011 	mutex_unlock(&sbi->connections.node_lock);
1012 	return lookup;
1013 }
1014 
hmdfs_lookup_from_cid(struct hmdfs_sb_info * sbi,uint8_t * cid)1015 struct hmdfs_peer *hmdfs_lookup_from_cid(struct hmdfs_sb_info *sbi,
1016 					 uint8_t *cid)
1017 {
1018 	struct hmdfs_peer *con = NULL;
1019 	struct hmdfs_peer *lookup = NULL;
1020 
1021 	if (!sbi)
1022 		return NULL;
1023 	mutex_lock(&sbi->connections.node_lock);
1024 	list_for_each_entry(con, &sbi->connections.node_list, list) {
1025 		if (strncmp(con->cid, cid, HMDFS_CID_SIZE) != 0)
1026 			continue;
1027 		lookup = con;
1028 		peer_get(lookup);
1029 		break;
1030 	}
1031 	mutex_unlock(&sbi->connections.node_lock);
1032 	return lookup;
1033 }
1034 
lookup_peer_by_cid_unsafe(struct hmdfs_sb_info * sbi,uint8_t * cid)1035 static struct hmdfs_peer *lookup_peer_by_cid_unsafe(struct hmdfs_sb_info *sbi,
1036 						    uint8_t *cid)
1037 {
1038 	struct hmdfs_peer *node = NULL;
1039 
1040 	list_for_each_entry(node, &sbi->connections.node_list, list)
1041 		if (!strncmp(node->cid, cid, HMDFS_CID_SIZE)) {
1042 			peer_get(node);
1043 			return node;
1044 		}
1045 	return NULL;
1046 }
1047 
add_peer_unsafe(struct hmdfs_sb_info * sbi,struct hmdfs_peer * peer2add)1048 static struct hmdfs_peer *add_peer_unsafe(struct hmdfs_sb_info *sbi,
1049 					  struct hmdfs_peer *peer2add)
1050 {
1051 	struct hmdfs_peer *peer;
1052 	int err;
1053 
1054 	peer = lookup_peer_by_cid_unsafe(sbi, peer2add->cid);
1055 	if (peer)
1056 		return peer;
1057 
1058 	err = hmdfs_register_peer_sysfs(sbi, peer2add);
1059 	if (err) {
1060 		hmdfs_err("register peer %llu sysfs err %d",
1061 			  peer2add->device_id, err);
1062 		return ERR_PTR(err);
1063 	}
1064 	list_add_tail(&peer2add->list, &sbi->connections.node_list);
1065 	peer_get(peer2add);
1066 	hmdfs_run_simple_evt_cb(peer2add, NODE_EVT_ADD);
1067 	return peer2add;
1068 }
1069 
alloc_peer(struct hmdfs_sb_info * sbi,uint8_t * cid,uint32_t devsl)1070 static struct hmdfs_peer *alloc_peer(struct hmdfs_sb_info *sbi, uint8_t *cid,
1071 	uint32_t devsl)
1072 {
1073 	struct hmdfs_peer *node = kzalloc(sizeof(*node), GFP_KERNEL);
1074 
1075 	if (!node)
1076 		return NULL;
1077 
1078 	node->device_id = (u32)atomic_inc_return(&sbi->connections.conn_seq);
1079 
1080 	node->async_wq = alloc_workqueue("dfs_async%u_%llu", WQ_MEM_RECLAIM, 0,
1081 					 sbi->seq, node->device_id);
1082 	if (!node->async_wq) {
1083 		hmdfs_err("Failed to alloc async wq");
1084 		goto out_err;
1085 	}
1086 	node->req_handle_wq = alloc_workqueue("dfs_req%u_%llu",
1087 					      WQ_UNBOUND | WQ_MEM_RECLAIM,
1088 					      sbi->async_req_max_active,
1089 					      sbi->seq, node->device_id);
1090 	if (!node->req_handle_wq) {
1091 		hmdfs_err("Failed to alloc req wq");
1092 		goto out_err;
1093 	}
1094 	node->dentry_wq = alloc_workqueue("dfs_dentry%u_%llu",
1095 					   WQ_UNBOUND | WQ_MEM_RECLAIM,
1096 					   0, sbi->seq, node->device_id);
1097 	if (!node->dentry_wq) {
1098 		hmdfs_err("Failed to alloc dentry wq");
1099 		goto out_err;
1100 	}
1101 	node->retry_wb_wq = alloc_workqueue("dfs_rwb%u_%llu",
1102 					   WQ_UNBOUND | WQ_MEM_RECLAIM,
1103 					   HMDFS_RETRY_WB_WQ_MAX_ACTIVE,
1104 					   sbi->seq, node->device_id);
1105 	if (!node->retry_wb_wq) {
1106 		hmdfs_err("Failed to alloc retry writeback wq");
1107 		goto out_err;
1108 	}
1109 	node->reget_conn_wq = alloc_workqueue("dfs_regetcon%u_%llu",
1110 					      WQ_UNBOUND, 0,
1111 					      sbi->seq, node->device_id);
1112 	if (!node->reget_conn_wq) {
1113 		hmdfs_err("Failed to alloc reget conn wq");
1114 		goto out_err;
1115 	}
1116 	INIT_LIST_HEAD(&node->conn_impl_list);
1117 	mutex_init(&node->conn_impl_list_lock);
1118 	INIT_LIST_HEAD(&node->conn_deleting_list);
1119 	init_waitqueue_head(&node->deleting_list_wq);
1120 	idr_init(&node->msg_idr);
1121 	spin_lock_init(&node->idr_lock);
1122 	idr_init(&node->file_id_idr);
1123 	spin_lock_init(&node->file_id_lock);
1124 	INIT_LIST_HEAD(&node->list);
1125 	kref_init(&node->ref_cnt);
1126 	node->owner = sbi->seq;
1127 	node->sbi = sbi;
1128 	node->version = HMDFS_VERSION;
1129 	node->status = NODE_STAT_SHAKING;
1130 	node->conn_time = jiffies;
1131 	memcpy(node->cid, cid, HMDFS_CID_SIZE);
1132 	atomic64_set(&node->sb_dirty_count, 0);
1133 	node->fid_cookie = 0;
1134 	atomic_set(&node->evt_seq, 0);
1135 	mutex_init(&node->seq_lock);
1136 	mutex_init(&node->offline_cb_lock);
1137 	mutex_init(&node->evt_lock);
1138 	node->pending_evt = RAW_NODE_EVT_NR;
1139 	node->last_evt = RAW_NODE_EVT_NR;
1140 	node->cur_evt[0] = RAW_NODE_EVT_NR;
1141 	node->cur_evt[1] = RAW_NODE_EVT_NR;
1142 	node->seq_wr_idx = (unsigned char)UINT_MAX;
1143 	node->seq_rd_idx = node->seq_wr_idx;
1144 	INIT_DELAYED_WORK(&node->evt_dwork, hmdfs_node_evt_work);
1145 	node->msg_idr_process = 0;
1146 	node->offline_start = false;
1147 	spin_lock_init(&node->wr_opened_inode_lock);
1148 	INIT_LIST_HEAD(&node->wr_opened_inode_list);
1149 	spin_lock_init(&node->stashed_inode_lock);
1150 	node->stashed_inode_nr = 0;
1151 	atomic_set(&node->rebuild_inode_status_nr, 0);
1152 	init_waitqueue_head(&node->rebuild_inode_status_wq);
1153 	INIT_LIST_HEAD(&node->stashed_inode_list);
1154 	node->need_rebuild_stash_list = false;
1155 	node->devsl = devsl;
1156 
1157 	return node;
1158 
1159 out_err:
1160 	if (node->async_wq) {
1161 		destroy_workqueue(node->async_wq);
1162 		node->async_wq = NULL;
1163 	}
1164 	if (node->req_handle_wq) {
1165 		destroy_workqueue(node->req_handle_wq);
1166 		node->req_handle_wq = NULL;
1167 	}
1168 	if (node->dentry_wq) {
1169 		destroy_workqueue(node->dentry_wq);
1170 		node->dentry_wq = NULL;
1171 	}
1172 	if (node->retry_wb_wq) {
1173 		destroy_workqueue(node->retry_wb_wq);
1174 		node->retry_wb_wq = NULL;
1175 	}
1176 	if (node->reget_conn_wq) {
1177 		destroy_workqueue(node->reget_conn_wq);
1178 		node->reget_conn_wq = NULL;
1179 	}
1180 	kfree(node);
1181 	return NULL;
1182 }
1183 
hmdfs_get_peer(struct hmdfs_sb_info * sbi,uint8_t * cid,uint32_t devsl)1184 struct hmdfs_peer *hmdfs_get_peer(struct hmdfs_sb_info *sbi, uint8_t *cid,
1185 	uint32_t devsl)
1186 {
1187 	struct hmdfs_peer *peer = NULL, *on_sbi_peer = NULL;
1188 
1189 	mutex_lock(&sbi->connections.node_lock);
1190 	peer = lookup_peer_by_cid_unsafe(sbi, cid);
1191 	mutex_unlock(&sbi->connections.node_lock);
1192 	if (peer) {
1193 		hmdfs_info("Got a existing peer: device_id = %llu",
1194 			   peer->device_id);
1195 		goto out;
1196 	}
1197 
1198 	peer = alloc_peer(sbi, cid, devsl);
1199 	if (unlikely(!peer)) {
1200 		hmdfs_info("Failed to alloc a peer");
1201 		goto out;
1202 	}
1203 
1204 	mutex_lock(&sbi->connections.node_lock);
1205 	on_sbi_peer = add_peer_unsafe(sbi, peer);
1206 	mutex_unlock(&sbi->connections.node_lock);
1207 	if (IS_ERR(on_sbi_peer)) {
1208 		peer_put(peer);
1209 		peer = NULL;
1210 		goto out;
1211 	} else if (unlikely(on_sbi_peer != peer)) {
1212 		hmdfs_info("Got a existing peer: device_id = %llu",
1213 			   on_sbi_peer->device_id);
1214 		peer_put(peer);
1215 		peer = on_sbi_peer;
1216 	} else {
1217 		hmdfs_info("Got a newly allocated peer: device_id = %llu",
1218 			   peer->device_id);
1219 	}
1220 
1221 out:
1222 	return peer;
1223 }
1224 
head_release(struct kref * kref)1225 static void head_release(struct kref *kref)
1226 {
1227 	struct hmdfs_msg_idr_head *head;
1228 	struct hmdfs_peer *con;
1229 
1230 	head = (struct hmdfs_msg_idr_head *)container_of(kref,
1231 			struct hmdfs_msg_idr_head, ref);
1232 	con = head->peer;
1233 	idr_remove(&con->msg_idr, head->msg_id);
1234 	spin_unlock(&con->idr_lock);
1235 
1236 	kfree(head);
1237 }
1238 
head_put(struct hmdfs_msg_idr_head * head)1239 void head_put(struct hmdfs_msg_idr_head *head)
1240 {
1241 	kref_put_lock(&head->ref, head_release, &head->peer->idr_lock);
1242 }
1243 
hmdfs_find_msg_head(struct hmdfs_peer * peer,int id)1244 struct hmdfs_msg_idr_head *hmdfs_find_msg_head(struct hmdfs_peer *peer, int id)
1245 {
1246 	struct hmdfs_msg_idr_head *head = NULL;
1247 
1248 	spin_lock(&peer->idr_lock);
1249 	head = idr_find(&peer->msg_idr, id);
1250 	if (head)
1251 		kref_get(&head->ref);
1252 	spin_unlock(&peer->idr_lock);
1253 
1254 	return head;
1255 }
1256 
hmdfs_alloc_msg_idr(struct hmdfs_peer * peer,enum MSG_IDR_TYPE type,void * ptr)1257 int hmdfs_alloc_msg_idr(struct hmdfs_peer *peer, enum MSG_IDR_TYPE type,
1258 			void *ptr)
1259 {
1260 	int ret = -EAGAIN;
1261 	struct hmdfs_msg_idr_head *head = ptr;
1262 
1263 	idr_preload(GFP_KERNEL);
1264 	spin_lock(&peer->idr_lock);
1265 	if (!peer->offline_start)
1266 		ret = idr_alloc_cyclic(&peer->msg_idr, ptr,
1267 				       1, 0, GFP_NOWAIT);
1268 	if (ret >= 0) {
1269 		kref_init(&head->ref);
1270 		head->msg_id = ret;
1271 		head->type = type;
1272 		head->peer = peer;
1273 		peer->msg_idr_process++;
1274 		ret = 0;
1275 	}
1276 	spin_unlock(&peer->idr_lock);
1277 	idr_preload_end();
1278 
1279 	return ret;
1280 }
1281