• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_VERBS_H
4 #define IRDMA_VERBS_H
5 
6 #define IRDMA_MAX_SAVED_PHY_PGADDR	4
7 #define IRDMA_FLUSH_DELAY_MS		20
8 
9 #define IRDMA_PKEY_TBL_SZ		1
10 #define IRDMA_DEFAULT_PKEY		0xFFFF
11 
12 struct irdma_ucontext {
13 	struct ib_ucontext ibucontext;
14 	struct irdma_device *iwdev;
15 	struct rdma_user_mmap_entry *db_mmap_entry;
16 	struct list_head cq_reg_mem_list;
17 	spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
18 	struct list_head qp_reg_mem_list;
19 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
20 	int abi_ver;
21 	bool legacy_mode;
22 };
23 
24 struct irdma_pd {
25 	struct ib_pd ibpd;
26 	struct irdma_sc_pd sc_pd;
27 };
28 
29 struct irdma_av {
30 	u8 macaddr[16];
31 	struct rdma_ah_attr attrs;
32 	union {
33 		struct sockaddr saddr;
34 		struct sockaddr_in saddr_in;
35 		struct sockaddr_in6 saddr_in6;
36 	} sgid_addr, dgid_addr;
37 	u8 net_type;
38 };
39 
40 struct irdma_ah {
41 	struct ib_ah ibah;
42 	struct irdma_sc_ah sc_ah;
43 	struct irdma_pd *pd;
44 	struct irdma_av av;
45 	u8 sgid_index;
46 	union ib_gid dgid;
47 };
48 
49 struct irdma_hmc_pble {
50 	union {
51 		u32 idx;
52 		dma_addr_t addr;
53 	};
54 };
55 
56 struct irdma_cq_mr {
57 	struct irdma_hmc_pble cq_pbl;
58 	dma_addr_t shadow;
59 	bool split;
60 };
61 
62 struct irdma_qp_mr {
63 	struct irdma_hmc_pble sq_pbl;
64 	struct irdma_hmc_pble rq_pbl;
65 	dma_addr_t shadow;
66 	struct page *sq_page;
67 };
68 
69 struct irdma_cq_buf {
70 	struct irdma_dma_mem kmem_buf;
71 	struct irdma_cq_uk cq_uk;
72 	struct irdma_hw *hw;
73 	struct list_head list;
74 	struct work_struct work;
75 };
76 
77 struct irdma_pbl {
78 	struct list_head list;
79 	union {
80 		struct irdma_qp_mr qp_mr;
81 		struct irdma_cq_mr cq_mr;
82 	};
83 
84 	bool pbl_allocated:1;
85 	bool on_list:1;
86 	u64 user_base;
87 	struct irdma_pble_alloc pble_alloc;
88 	struct irdma_mr *iwmr;
89 };
90 
91 struct irdma_mr {
92 	union {
93 		struct ib_mr ibmr;
94 		struct ib_mw ibmw;
95 	};
96 	struct ib_umem *region;
97 	u16 type;
98 	u32 page_cnt;
99 	u64 page_size;
100 	u32 npages;
101 	u32 stag;
102 	u64 len;
103 	u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
104 	struct irdma_pbl iwpbl;
105 };
106 
107 struct irdma_cq {
108 	struct ib_cq ibcq;
109 	struct irdma_sc_cq sc_cq;
110 	u16 cq_head;
111 	u16 cq_size;
112 	u16 cq_num;
113 	bool user_mode;
114 	atomic_t armed;
115 	enum irdma_cmpl_notify last_notify;
116 	u32 polled_cmpls;
117 	u32 cq_mem_size;
118 	struct irdma_dma_mem kmem;
119 	struct irdma_dma_mem kmem_shadow;
120 	spinlock_t lock; /* for poll cq */
121 	struct irdma_pbl *iwpbl;
122 	struct irdma_pbl *iwpbl_shadow;
123 	struct list_head resize_list;
124 	struct irdma_cq_poll_info cur_cqe;
125 	struct list_head cmpl_generated;
126 };
127 
128 struct irdma_cmpl_gen {
129 	struct list_head list;
130 	struct irdma_cq_poll_info cpi;
131 };
132 
133 struct disconn_work {
134 	struct work_struct work;
135 	struct irdma_qp *iwqp;
136 };
137 
138 struct iw_cm_id;
139 
140 struct irdma_qp_kmode {
141 	struct irdma_dma_mem dma_mem;
142 	struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
143 	u64 *rq_wrid_mem;
144 };
145 
146 struct irdma_qp {
147 	struct ib_qp ibqp;
148 	struct irdma_sc_qp sc_qp;
149 	struct irdma_device *iwdev;
150 	struct irdma_cq *iwscq;
151 	struct irdma_cq *iwrcq;
152 	struct irdma_pd *iwpd;
153 	struct rdma_user_mmap_entry *push_wqe_mmap_entry;
154 	struct rdma_user_mmap_entry *push_db_mmap_entry;
155 	struct irdma_qp_host_ctx_info ctx_info;
156 	union {
157 		struct irdma_iwarp_offload_info iwarp_info;
158 		struct irdma_roce_offload_info roce_info;
159 	};
160 
161 	union {
162 		struct irdma_tcp_offload_info tcp_info;
163 		struct irdma_udp_offload_info udp_info;
164 	};
165 
166 	struct irdma_ah roce_ah;
167 	struct list_head teardown_entry;
168 	refcount_t refcnt;
169 	struct iw_cm_id *cm_id;
170 	struct irdma_cm_node *cm_node;
171 	struct delayed_work dwork_flush;
172 	struct ib_mr *lsmm_mr;
173 	atomic_t hw_mod_qp_pend;
174 	enum ib_qp_state ibqp_state;
175 	u32 qp_mem_size;
176 	u32 last_aeq;
177 	int max_send_wr;
178 	int max_recv_wr;
179 	atomic_t close_timer_started;
180 	spinlock_t lock; /* serialize posting WRs to SQ/RQ */
181 	struct irdma_qp_context *iwqp_context;
182 	void *pbl_vbase;
183 	dma_addr_t pbl_pbase;
184 	struct page *page;
185 	u8 active_conn : 1;
186 	u8 user_mode : 1;
187 	u8 hte_added : 1;
188 	u8 flush_issued : 1;
189 	u8 sig_all : 1;
190 	u8 pau_mode : 1;
191 	u8 suspend_pending : 1;
192 	u8 rsvd : 1;
193 	u8 iwarp_state;
194 	u16 term_sq_flush_code;
195 	u16 term_rq_flush_code;
196 	u8 hw_iwarp_state;
197 	u8 hw_tcp_state;
198 	struct irdma_qp_kmode kqp;
199 	struct irdma_dma_mem host_ctx;
200 	struct timer_list terminate_timer;
201 	struct irdma_pbl *iwpbl;
202 	struct irdma_dma_mem q2_ctx_mem;
203 	struct irdma_dma_mem ietf_mem;
204 	struct completion free_qp;
205 	wait_queue_head_t waitq;
206 	wait_queue_head_t mod_qp_waitq;
207 	u8 rts_ae_rcvd;
208 };
209 
210 enum irdma_mmap_flag {
211 	IRDMA_MMAP_IO_NC,
212 	IRDMA_MMAP_IO_WC,
213 };
214 
215 struct irdma_user_mmap_entry {
216 	struct rdma_user_mmap_entry rdma_entry;
217 	u64 bar_offset;
218 	u8 mmap_flag;
219 };
220 
irdma_fw_major_ver(struct irdma_sc_dev * dev)221 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
222 {
223 	return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
224 }
225 
irdma_fw_minor_ver(struct irdma_sc_dev * dev)226 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
227 {
228 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
229 }
230 
231 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
232 int irdma_ib_register_device(struct irdma_device *iwdev);
233 void irdma_ib_unregister_device(struct irdma_device *iwdev);
234 void irdma_ib_dealloc_device(struct ib_device *ibdev);
235 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
236 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
237 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
238 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
239 #endif /* IRDMA_VERBS_H */
240