1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/hmdfs/comm/connection.h
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #ifndef HMDFS_CONNECTION_H
9 #define HMDFS_CONNECTION_H
10
11 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
12 #include <linux/tls.h>
13 #endif
14
15 #include <crypto/aead.h>
16 #include <net/sock.h>
17 #include "protocol.h"
18 #include "node_cb.h"
19
20 #define HMDFS_KEY_SIZE 32
21 #define HMDFS_IV_SIZE 12
22 #define HMDFS_TAG_SIZE 16
23 #define HMDFS_CID_SIZE 64
24
25 enum {
26 CONNECT_MESG_HANDSHAKE_REQUEST = 1,
27 CONNECT_MESG_HANDSHAKE_RESPONSE = 2,
28 CONNECT_MESG_HANDSHAKE_ACK = 3,
29 };
30
31 enum {
32 CONNECT_STAT_WAIT_REQUEST = 0,
33 CONNECT_STAT_WAIT_RESPONSE,
34 CONNECT_STAT_WORKING,
35 CONNECT_STAT_STOP,
36 CONNECT_STAT_WAIT_ACK,
37 CONNECT_STAT_NEGO_FAIL,
38 CONNECT_STAT_COUNT
39 };
40
41 enum {
42 CONNECT_TYPE_TCP = 0,
43 CONNECT_TYPE_UNSUPPORT,
44 };
45
46 struct connection_stat {
47 int64_t send_bytes;
48 int64_t recv_bytes;
49 int send_message_count;
50 int recv_message_count;
51 unsigned long rekey_time;
52 };
53
54 struct connection {
55 struct list_head list;
56 struct kref ref_cnt;
57 struct mutex ref_lock;
58 struct hmdfs_peer *node;
59 int type;
60 int status;
61 void *connect_handle;
62 struct crypto_aead *tfm;
63 u8 master_key[HMDFS_KEY_SIZE];
64 u8 send_key[HMDFS_KEY_SIZE];
65 u8 recv_key[HMDFS_KEY_SIZE];
66 struct connection_stat stat;
67 struct work_struct reget_work;
68 #ifdef CONFIG_HMDFS_FS_ENCRYPTION
69 struct tls12_crypto_info_aes_gcm_128 send_crypto_info;
70 struct tls12_crypto_info_aes_gcm_128 recv_crypto_info;
71 #endif
72 void (*close)(struct connection *connect);
73 int (*send_message)(struct connection *connect,
74 struct hmdfs_send_data *msg);
75 uint32_t crypto;
76 };
77
78 enum {
79 NODE_STAT_SHAKING = 0,
80 NODE_STAT_ONLINE,
81 NODE_STAT_OFFLINE,
82 };
83
84 struct hmdfs_async_work {
85 struct hmdfs_msg_idr_head head;
86 struct page *page;
87 struct delayed_work d_work;
88 unsigned long start;
89 };
90
91 enum {
92 RAW_NODE_EVT_OFF = 0,
93 RAW_NODE_EVT_ON,
94 RAW_NODE_EVT_NR,
95 };
96
97 #define RAW_NODE_EVT_MAX_NR 4
98
99 struct hmdfs_stash_statistics {
100 unsigned int cur_ok;
101 unsigned int cur_nothing;
102 unsigned int cur_fail;
103 unsigned int total_ok;
104 unsigned int total_nothing;
105 unsigned int total_fail;
106 unsigned long long ok_pages;
107 unsigned long long fail_pages;
108 };
109
110 struct hmdfs_restore_statistics {
111 unsigned int cur_ok;
112 unsigned int cur_fail;
113 unsigned int cur_keep;
114 unsigned int total_ok;
115 unsigned int total_fail;
116 unsigned int total_keep;
117 unsigned long long ok_pages;
118 unsigned long long fail_pages;
119 };
120
121 struct hmdfs_rebuild_statistics {
122 unsigned int cur_ok;
123 unsigned int cur_fail;
124 unsigned int cur_invalid;
125 unsigned int total_ok;
126 unsigned int total_fail;
127 unsigned int total_invalid;
128 unsigned int time;
129 };
130
131 struct hmdfs_peer_statistics {
132 /* stash statistics */
133 struct hmdfs_stash_statistics stash;
134 /* restore statistics */
135 struct hmdfs_restore_statistics restore;
136 /* rebuild statistics */
137 struct hmdfs_rebuild_statistics rebuild;
138 };
139
140 struct hmdfs_peer {
141 struct list_head list;
142 struct kref ref_cnt;
143 unsigned int owner;
144 uint64_t device_id;
145 unsigned long conn_time;
146 uint8_t version;
147 int status;
148 u64 features;
149 long long old_sb_dirty_count;
150 atomic64_t sb_dirty_count;
151 /*
152 * cookie for opened file id.
153 * It will be increased if peer has offlined
154 */
155 uint16_t fid_cookie;
156 struct mutex conn_impl_list_lock;
157 struct list_head conn_impl_list;
158 /*
159 * when async message process context call hmdfs_reget_connection
160 * add conn node to conn_deleting_list, so call hmdfs_disconnect_node
161 * can wait all receive thread exit
162 */
163 struct list_head conn_deleting_list;
164 wait_queue_head_t deleting_list_wq;
165 struct idr msg_idr;
166 spinlock_t idr_lock;
167 struct idr file_id_idr;
168 spinlock_t file_id_lock;
169 int recvbuf_maxsize;
170 struct crypto_aead *tfm;
171 char cid[HMDFS_CID_SIZE + 1];
172 const struct connection_operations *conn_operations;
173 struct hmdfs_sb_info *sbi;
174 struct workqueue_struct *async_wq;
175 struct workqueue_struct *req_handle_wq;
176 struct workqueue_struct *dentry_wq;
177 struct workqueue_struct *retry_wb_wq;
178 struct workqueue_struct *reget_conn_wq;
179 atomic_t evt_seq;
180 /* sync cb may be blocking */
181 struct mutex seq_lock;
182 struct mutex offline_cb_lock;
183 struct mutex evt_lock;
184 unsigned char pending_evt;
185 unsigned char last_evt;
186 unsigned char waiting_evt[RAW_NODE_EVT_NR];
187 unsigned char seq_rd_idx;
188 unsigned char seq_wr_idx;
189 unsigned int seq_tbl[RAW_NODE_EVT_MAX_NR];
190 unsigned int pending_evt_seq;
191 unsigned char cur_evt[NODE_EVT_TYPE_NR];
192 unsigned int cur_evt_seq[NODE_EVT_TYPE_NR];
193 unsigned int merged_evt;
194 unsigned int dup_evt[RAW_NODE_EVT_NR];
195 struct delayed_work evt_dwork;
196 /* protected by idr_lock */
197 uint64_t msg_idr_process;
198 bool offline_start;
199 spinlock_t wr_opened_inode_lock;
200 struct list_head wr_opened_inode_list;
201 /*
202 * protect @stashed_inode_list and @stashed_inode_nr in stash process
203 * and fill_inode_remote->hmdfs_remote_init_stash_status process
204 */
205 spinlock_t stashed_inode_lock;
206 unsigned int stashed_inode_nr;
207 struct list_head stashed_inode_list;
208 bool need_rebuild_stash_list;
209 /* how many inodes are rebuilding statsh status */
210 atomic_t rebuild_inode_status_nr;
211 wait_queue_head_t rebuild_inode_status_wq;
212 struct hmdfs_peer_statistics stats;
213 /* sysfs */
214 struct kobject kobj;
215 struct completion kobj_unregister;
216 uint32_t devsl;
217 };
218
219 #define HMDFS_DEVID_LOCAL 0
220
221 /* Be Compatible to DFS1.0, dont add packed attribute so far */
222 struct connection_msg_head {
223 __u8 magic;
224 __u8 version;
225 __u8 operations;
226 __u8 flags;
227 __le32 datasize;
228 __le64 source;
229 __le16 msg_id;
230 __le16 request_id;
231 __le32 reserved1;
232 } __packed;
233
234 struct connection_handshake_req {
235 __le32 len;
236 char dev_id[0];
237 } __packed;
238
239 enum {
240 HS_EXTEND_CODE_CRYPTO = 0,
241 HS_EXTEND_CODE_CASE_SENSE,
242 HS_EXTEND_CODE_FEATURE_SUPPORT,
243 HS_EXTEND_CODE_COUNT
244 };
245
246 struct conn_hs_extend_reg {
247 __u16 len;
248 __u16 resv;
249 void (*filler)(struct connection *conn_impl, __u8 ops,
250 void *data, __u32 len);
251 int (*parser)(struct connection *conn_impl, __u8 ops,
252 void *data, __u32 len);
253 };
254
255 struct conn_hs_extend_head {
256 __le32 field_cn;
257 char data[0];
258 };
259
260 struct extend_field_head {
261 __le16 code;
262 __le16 len;
263 } __packed;
264
265 struct crypto_body {
266 __le32 crypto;
267 } __packed;
268
269 struct case_sense_body {
270 __u8 case_sensitive;
271 } __packed;
272
273 struct feature_body {
274 __u64 features;
275 __u64 reserved;
276 } __packed;
277
278 #define HMDFS_HS_CRYPTO_KTLS_AES128 0x00000001
279 #define HMDFS_HS_CRYPTO_KTLS_AES256 0x00000002
280
hmdfs_is_node_online(const struct hmdfs_peer * node)281 static inline bool hmdfs_is_node_online(const struct hmdfs_peer *node)
282 {
283 return READ_ONCE(node->status) == NODE_STAT_ONLINE;
284 }
285
hmdfs_node_inc_evt_seq(struct hmdfs_peer * node)286 static inline unsigned int hmdfs_node_inc_evt_seq(struct hmdfs_peer *node)
287 {
288 /* Use the atomic as an unsigned integer */
289 return atomic_inc_return(&node->evt_seq);
290 }
291
hmdfs_node_evt_seq(const struct hmdfs_peer * node)292 static inline unsigned int hmdfs_node_evt_seq(const struct hmdfs_peer *node)
293 {
294 return atomic_read(&node->evt_seq);
295 }
296
297 struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type);
298
299 void set_conn_sock_quickack(struct hmdfs_peer *node);
300
301 struct hmdfs_peer *hmdfs_get_peer(struct hmdfs_sb_info *sbi, uint8_t *cid,
302 uint32_t devsl);
303
304 struct hmdfs_peer *hmdfs_lookup_from_devid(struct hmdfs_sb_info *sbi,
305 uint64_t device_id);
306 struct hmdfs_peer *hmdfs_lookup_from_cid(struct hmdfs_sb_info *sbi,
307 uint8_t *cid);
308 void connection_send_handshake(struct connection *conn_impl, __u8 operations,
309 __le16 request_id);
310 void connection_handshake_recv_handler(struct connection *conn_impl, void *buf,
311 void *data, __u32 data_len);
312 void connection_working_recv_handler(struct connection *conn_impl, void *head,
313 void *data, __u32 data_len);
connection_get(struct connection * conn)314 static inline void connection_get(struct connection *conn)
315 {
316 kref_get(&conn->ref_cnt);
317 }
318
319 void connection_put(struct connection *conn);
peer_get(struct hmdfs_peer * peer)320 static inline void peer_get(struct hmdfs_peer *peer)
321 {
322 kref_get(&peer->ref_cnt);
323 }
324
325 void peer_put(struct hmdfs_peer *peer);
326
327 int hmdfs_sendmessage(struct hmdfs_peer *node, struct hmdfs_send_data *msg);
328 void hmdfs_connections_stop(struct hmdfs_sb_info *sbi);
329
330 void hmdfs_disconnect_node(struct hmdfs_peer *node);
331
332 void connection_to_working(struct hmdfs_peer *node);
333
334 int hmdfs_alloc_msg_idr(struct hmdfs_peer *peer, enum MSG_IDR_TYPE type,
335 void *ptr);
336 struct hmdfs_msg_idr_head *hmdfs_find_msg_head(struct hmdfs_peer *peer, int id);
337
hmdfs_start_process_offline(struct hmdfs_peer * peer)338 static inline void hmdfs_start_process_offline(struct hmdfs_peer *peer)
339 {
340 spin_lock(&peer->idr_lock);
341 peer->offline_start = true;
342 spin_unlock(&peer->idr_lock);
343 }
344
hmdfs_stop_process_offline(struct hmdfs_peer * peer)345 static inline void hmdfs_stop_process_offline(struct hmdfs_peer *peer)
346 {
347 spin_lock(&peer->idr_lock);
348 peer->offline_start = false;
349 spin_unlock(&peer->idr_lock);
350 }
351
hmdfs_dec_msg_idr_process(struct hmdfs_peer * peer)352 static inline void hmdfs_dec_msg_idr_process(struct hmdfs_peer *peer)
353 {
354 spin_lock(&peer->idr_lock);
355 peer->msg_idr_process--;
356 spin_unlock(&peer->idr_lock);
357 }
358 #endif
359