1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/hmdfs/comm/connection.h
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #ifndef HMDFS_CONNECTION_H
9 #define HMDFS_CONNECTION_H
10
11 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
12 #include <linux/tls.h>
13 #endif
14
15 #include <crypto/aead.h>
16 #include <net/sock.h>
17 #include "protocol.h"
18 #include "node_cb.h"
19
20 #define HMDFS_KEY_SIZE 32
21 #define HMDFS_IV_SIZE 12
22 #define HMDFS_TAG_SIZE 16
23 #define HMDFS_CID_SIZE 64
24
25 enum {
26 CONNECT_MESG_HANDSHAKE_REQUEST = 1,
27 CONNECT_MESG_HANDSHAKE_RESPONSE = 2,
28 CONNECT_MESG_HANDSHAKE_ACK = 3,
29 };
30
31 enum {
32 CONNECT_STAT_WAIT_REQUEST = 0,
33 CONNECT_STAT_WAIT_RESPONSE,
34 CONNECT_STAT_WORKING,
35 CONNECT_STAT_STOP,
36 CONNECT_STAT_WAIT_ACK,
37 CONNECT_STAT_NEGO_FAIL,
38 CONNECT_STAT_COUNT
39 };
40
41 enum {
42 CONNECT_TYPE_TCP = 0,
43 CONNECT_TYPE_UNSUPPORT,
44 };
45
46 struct connection_stat {
47 int64_t send_bytes;
48 int64_t recv_bytes;
49 int send_message_count;
50 int recv_message_count;
51 unsigned long rekey_time;
52 };
53
54 struct connection {
55 struct list_head list;
56 struct kref ref_cnt;
57 struct mutex ref_lock;
58 struct hmdfs_peer *node;
59 int type;
60 int status;
61 void *connect_handle;
62 struct crypto_aead *tfm;
63 u8 master_key[HMDFS_KEY_SIZE];
64 u8 send_key[HMDFS_KEY_SIZE];
65 u8 recv_key[HMDFS_KEY_SIZE];
66 struct connection_stat stat;
67 struct work_struct reget_work;
68 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
69 struct tls12_crypto_info_aes_gcm_128 send_crypto_info;
70 struct tls12_crypto_info_aes_gcm_128 recv_crypto_info;
71 #endif
72 void (*close)(struct connection *connect);
73 int (*send_message)(struct connection *connect,
74 struct hmdfs_send_data *msg);
75 uint32_t crypto;
76 };
77
78 enum {
79 NODE_STAT_SHAKING = 0,
80 NODE_STAT_ONLINE,
81 NODE_STAT_OFFLINE,
82 };
83
84 struct hmdfs_async_work {
85 struct hmdfs_msg_idr_head head;
86 struct page *page;
87 struct delayed_work d_work;
88 unsigned long start;
89 };
90
91 enum {
92 RAW_NODE_EVT_OFF = 0,
93 RAW_NODE_EVT_ON,
94 RAW_NODE_EVT_NR,
95 };
96
97 #define RAW_NODE_EVT_MAX_NR 4
98
99 struct hmdfs_stash_statistics {
100 unsigned int cur_ok;
101 unsigned int cur_nothing;
102 unsigned int cur_fail;
103 unsigned int total_ok;
104 unsigned int total_nothing;
105 unsigned int total_fail;
106 unsigned long long ok_pages;
107 unsigned long long fail_pages;
108 };
109
110 struct hmdfs_restore_statistics {
111 unsigned int cur_ok;
112 unsigned int cur_fail;
113 unsigned int cur_keep;
114 unsigned int total_ok;
115 unsigned int total_fail;
116 unsigned int total_keep;
117 unsigned long long ok_pages;
118 unsigned long long fail_pages;
119 };
120
121 struct hmdfs_rebuild_statistics {
122 unsigned int cur_ok;
123 unsigned int cur_fail;
124 unsigned int cur_invalid;
125 unsigned int total_ok;
126 unsigned int total_fail;
127 unsigned int total_invalid;
128 unsigned int time;
129 };
130
131 struct hmdfs_peer_statistics {
132 /* stash statistics */
133 struct hmdfs_stash_statistics stash;
134 /* restore statistics */
135 struct hmdfs_restore_statistics restore;
136 /* rebuild statistics */
137 struct hmdfs_rebuild_statistics rebuild;
138 };
139
140 struct hmdfs_peer {
141 struct list_head list;
142 struct kref ref_cnt;
143 unsigned int owner;
144 uint64_t device_id;
145 unsigned long conn_time;
146 uint8_t version;
147 int status;
148 u64 features;
149 long long old_sb_dirty_count;
150 atomic64_t sb_dirty_count;
151 /*
152 * cookie for opened file id.
153 * It will be increased if peer has offlined
154 */
155 uint16_t fid_cookie;
156 struct mutex conn_impl_list_lock;
157 struct list_head conn_impl_list;
158 /*
159 * when async message process context call hmdfs_reget_connection
160 * add conn node to conn_deleting_list, so call hmdfs_disconnect_node
161 * can wait all receive thread exit
162 */
163 struct list_head conn_deleting_list;
164 wait_queue_head_t deleting_list_wq;
165 struct idr msg_idr;
166 spinlock_t idr_lock;
167 struct idr file_id_idr;
168 spinlock_t file_id_lock;
169 int recvbuf_maxsize;
170 struct crypto_aead *tfm;
171 char cid[HMDFS_CID_SIZE + 1];
172 struct hmdfs_sb_info *sbi;
173 struct workqueue_struct *async_wq;
174 struct workqueue_struct *req_handle_wq;
175 struct workqueue_struct *dentry_wq;
176 struct workqueue_struct *retry_wb_wq;
177 struct workqueue_struct *reget_conn_wq;
178 atomic_t evt_seq;
179 /* sync cb may be blocking */
180 struct mutex seq_lock;
181 struct mutex offline_cb_lock;
182 struct mutex evt_lock;
183 unsigned char pending_evt;
184 unsigned char last_evt;
185 unsigned char waiting_evt[RAW_NODE_EVT_NR];
186 unsigned char seq_rd_idx;
187 unsigned char seq_wr_idx;
188 unsigned int seq_tbl[RAW_NODE_EVT_MAX_NR];
189 unsigned int pending_evt_seq;
190 unsigned char cur_evt[NODE_EVT_TYPE_NR];
191 unsigned int cur_evt_seq[NODE_EVT_TYPE_NR];
192 unsigned int merged_evt;
193 unsigned int dup_evt[RAW_NODE_EVT_NR];
194 struct delayed_work evt_dwork;
195 /* protected by idr_lock */
196 uint64_t msg_idr_process;
197 bool offline_start;
198 spinlock_t wr_opened_inode_lock;
199 struct list_head wr_opened_inode_list;
200 /*
201 * protect @stashed_inode_list and @stashed_inode_nr in stash process
202 * and fill_inode_remote->hmdfs_remote_init_stash_status process
203 */
204 spinlock_t stashed_inode_lock;
205 unsigned int stashed_inode_nr;
206 struct list_head stashed_inode_list;
207 bool need_rebuild_stash_list;
208 /* how many inodes are rebuilding statsh status */
209 atomic_t rebuild_inode_status_nr;
210 wait_queue_head_t rebuild_inode_status_wq;
211 struct hmdfs_peer_statistics stats;
212 /* sysfs */
213 struct kobject kobj;
214 struct completion kobj_unregister;
215 uint32_t devsl;
216 };
217
218 #define HMDFS_DEVID_LOCAL 0
219
220 /* Be Compatible to DFS1.0, dont add packed attribute so far */
221 struct connection_msg_head {
222 __u8 magic;
223 __u8 version;
224 __u8 operations;
225 __u8 flags;
226 __le32 datasize;
227 __le64 source;
228 __le16 msg_id;
229 __le16 request_id;
230 __le32 reserved1;
231 } __packed;
232
233 struct connection_handshake_req {
234 __le32 len;
235 char dev_id[0];
236 } __packed;
237
238 enum {
239 HS_EXTEND_CODE_CRYPTO = 0,
240 HS_EXTEND_CODE_CASE_SENSE,
241 HS_EXTEND_CODE_FEATURE_SUPPORT,
242 HS_EXTEND_CODE_COUNT
243 };
244
245 struct conn_hs_extend_reg {
246 __u16 len;
247 __u16 resv;
248 void (*filler)(struct connection *conn_impl, __u8 ops,
249 void *data, __u32 len);
250 int (*parser)(struct connection *conn_impl, __u8 ops,
251 void *data, __u32 len);
252 };
253
254 struct conn_hs_extend_head {
255 __le32 field_cn;
256 char data[0];
257 };
258
259 struct extend_field_head {
260 __le16 code;
261 __le16 len;
262 } __packed;
263
264 struct crypto_body {
265 __le32 crypto;
266 } __packed;
267
268 struct case_sense_body {
269 __u8 case_sensitive;
270 } __packed;
271
272 struct feature_body {
273 __u64 features;
274 __u64 reserved;
275 } __packed;
276
277 #define HMDFS_HS_CRYPTO_KTLS_AES128 0x00000001
278 #define HMDFS_HS_CRYPTO_KTLS_AES256 0x00000002
279
hmdfs_is_node_online(const struct hmdfs_peer * node)280 static inline bool hmdfs_is_node_online(const struct hmdfs_peer *node)
281 {
282 return READ_ONCE(node->status) == NODE_STAT_ONLINE;
283 }
284
hmdfs_node_inc_evt_seq(struct hmdfs_peer * node)285 static inline unsigned int hmdfs_node_inc_evt_seq(struct hmdfs_peer *node)
286 {
287 /* Use the atomic as an unsigned integer */
288 return atomic_inc_return(&node->evt_seq);
289 }
290
hmdfs_node_evt_seq(const struct hmdfs_peer * node)291 static inline unsigned int hmdfs_node_evt_seq(const struct hmdfs_peer *node)
292 {
293 return atomic_read(&node->evt_seq);
294 }
295
296 struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type);
297
298 void set_conn_sock_quickack(struct hmdfs_peer *node);
299
300 struct hmdfs_peer *hmdfs_get_peer(struct hmdfs_sb_info *sbi, uint8_t *cid,
301 uint32_t devsl);
302
303 struct hmdfs_peer *hmdfs_lookup_from_devid(struct hmdfs_sb_info *sbi,
304 uint64_t device_id);
305 struct hmdfs_peer *hmdfs_lookup_from_cid(struct hmdfs_sb_info *sbi,
306 uint8_t *cid);
307 void connection_send_handshake(struct connection *conn_impl, __u8 operations,
308 __le16 request_id);
309 void connection_handshake_recv_handler(struct connection *conn_impl, void *buf,
310 void *data, __u32 data_len);
311 void connection_working_recv_handler(struct connection *conn_impl, void *head,
312 void *data, __u32 data_len);
connection_get(struct connection * conn)313 static inline void connection_get(struct connection *conn)
314 {
315 kref_get(&conn->ref_cnt);
316 }
317
318 void connection_put(struct connection *conn);
peer_get(struct hmdfs_peer * peer)319 static inline void peer_get(struct hmdfs_peer *peer)
320 {
321 kref_get(&peer->ref_cnt);
322 }
323
324 void peer_put(struct hmdfs_peer *peer);
325
326 int hmdfs_sendmessage(struct hmdfs_peer *node, struct hmdfs_send_data *msg);
327 void hmdfs_connections_stop(struct hmdfs_sb_info *sbi);
328
329 void hmdfs_disconnect_node(struct hmdfs_peer *node);
330
331 void connection_to_working(struct hmdfs_peer *node);
332
333 int hmdfs_alloc_msg_idr(struct hmdfs_peer *peer, enum MSG_IDR_TYPE type,
334 void *ptr);
335 struct hmdfs_msg_idr_head *hmdfs_find_msg_head(struct hmdfs_peer *peer, int id);
336
hmdfs_start_process_offline(struct hmdfs_peer * peer)337 static inline void hmdfs_start_process_offline(struct hmdfs_peer *peer)
338 {
339 spin_lock(&peer->idr_lock);
340 peer->offline_start = true;
341 spin_unlock(&peer->idr_lock);
342 }
343
hmdfs_stop_process_offline(struct hmdfs_peer * peer)344 static inline void hmdfs_stop_process_offline(struct hmdfs_peer *peer)
345 {
346 spin_lock(&peer->idr_lock);
347 peer->offline_start = false;
348 spin_unlock(&peer->idr_lock);
349 }
350
hmdfs_dec_msg_idr_process(struct hmdfs_peer * peer)351 static inline void hmdfs_dec_msg_idr_process(struct hmdfs_peer *peer)
352 {
353 spin_lock(&peer->idr_lock);
354 peer->msg_idr_process--;
355 spin_unlock(&peer->idr_lock);
356 }
357 #endif
358