• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/mlx5/driver.h>
8 #include "mlx5_ib.h"
9 #include "srq.h"
10 #include "qp.h"
11 
get_pas_size(struct mlx5_srq_attr * in)12 static int get_pas_size(struct mlx5_srq_attr *in)
13 {
14 	u32 log_page_size = in->log_page_size + 12;
15 	u32 log_srq_size  = in->log_size;
16 	u32 log_rq_stride = in->wqe_shift;
17 	u32 page_offset   = in->page_offset;
18 	u32 po_quanta	  = 1 << (log_page_size - 6);
19 	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
20 	u32 page_size	  = 1 << log_page_size;
21 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
22 	u32 rq_num_pas    = DIV_ROUND_UP(rq_sz_po, page_size);
23 
24 	return rq_num_pas * sizeof(u64);
25 }
26 
set_wq(void * wq,struct mlx5_srq_attr * in)27 static void set_wq(void *wq, struct mlx5_srq_attr *in)
28 {
29 	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
30 		 & MLX5_SRQ_FLAG_WQ_SIG));
31 	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
32 	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
33 	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
34 	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
35 	MLX5_SET(wq,   wq, lwm,		  in->lwm);
36 	MLX5_SET(wq,   wq, pd,		  in->pd);
37 	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
38 }
39 
set_srqc(void * srqc,struct mlx5_srq_attr * in)40 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
41 {
42 	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
43 		 & MLX5_SRQ_FLAG_WQ_SIG));
44 	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
45 	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
46 	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
47 	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
48 	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
49 	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
50 	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
51 	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
52 	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
53 }
54 
get_wq(void * wq,struct mlx5_srq_attr * in)55 static void get_wq(void *wq, struct mlx5_srq_attr *in)
56 {
57 	if (MLX5_GET(wq, wq, wq_signature))
58 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
59 	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
60 	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
61 	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
62 	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
63 	in->lwm		  = MLX5_GET(wq,   wq, lwm);
64 	in->pd		  = MLX5_GET(wq,   wq, pd);
65 	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
66 }
67 
get_srqc(void * srqc,struct mlx5_srq_attr * in)68 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
69 {
70 	if (MLX5_GET(srqc, srqc, wq_signature))
71 		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
72 	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
73 	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
74 	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
75 	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
76 	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
77 	in->pd		  = MLX5_GET(srqc,   srqc, pd);
78 	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
79 }
80 
mlx5_cmd_get_srq(struct mlx5_ib_dev * dev,u32 srqn)81 struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
82 {
83 	struct mlx5_srq_table *table = &dev->srq_table;
84 	struct mlx5_core_srq *srq;
85 
86 	xa_lock_irq(&table->array);
87 	srq = xa_load(&table->array, srqn);
88 	if (srq)
89 		refcount_inc(&srq->common.refcount);
90 	xa_unlock_irq(&table->array);
91 
92 	return srq;
93 }
94 
create_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)95 static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
96 			  struct mlx5_srq_attr *in)
97 {
98 	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
99 	void *create_in;
100 	void *srqc;
101 	void *pas;
102 	int pas_size;
103 	int inlen;
104 	int err;
105 
106 	pas_size  = get_pas_size(in);
107 	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
108 	create_in = kvzalloc(inlen, GFP_KERNEL);
109 	if (!create_in)
110 		return -ENOMEM;
111 
112 	MLX5_SET(create_srq_in, create_in, uid, in->uid);
113 	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
114 	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
115 
116 	set_srqc(srqc, in);
117 	memcpy(pas, in->pas, pas_size);
118 
119 	MLX5_SET(create_srq_in, create_in, opcode,
120 		 MLX5_CMD_OP_CREATE_SRQ);
121 
122 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
123 			    sizeof(create_out));
124 	kvfree(create_in);
125 	if (!err) {
126 		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
127 		srq->uid = in->uid;
128 	}
129 
130 	return err;
131 }
132 
destroy_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq)133 static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
134 {
135 	u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
136 
137 	MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
138 	MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
139 	MLX5_SET(destroy_srq_in, in, uid, srq->uid);
140 
141 	return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
142 }
143 
arm_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)144 static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
145 		       u16 lwm, int is_srq)
146 {
147 	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
148 
149 	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
150 	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
151 	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
152 	MLX5_SET(arm_rq_in, in, lwm, lwm);
153 	MLX5_SET(arm_rq_in, in, uid, srq->uid);
154 
155 	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
156 }
157 
query_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)158 static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
159 			 struct mlx5_srq_attr *out)
160 {
161 	u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
162 	u32 *srq_out;
163 	void *srqc;
164 	int err;
165 
166 	srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
167 	if (!srq_out)
168 		return -ENOMEM;
169 
170 	MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
171 	MLX5_SET(query_srq_in, in, srqn, srq->srqn);
172 	err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
173 	if (err)
174 		goto out;
175 
176 	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
177 	get_srqc(srqc, out);
178 	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
179 		out->flags |= MLX5_SRQ_FLAG_ERR;
180 out:
181 	kvfree(srq_out);
182 	return err;
183 }
184 
create_xrc_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)185 static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
186 			      struct mlx5_core_srq *srq,
187 			      struct mlx5_srq_attr *in)
188 {
189 	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
190 	void *create_in;
191 	void *xrc_srqc;
192 	void *pas;
193 	int pas_size;
194 	int inlen;
195 	int err;
196 
197 	pas_size  = get_pas_size(in);
198 	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
199 	create_in = kvzalloc(inlen, GFP_KERNEL);
200 	if (!create_in)
201 		return -ENOMEM;
202 
203 	MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
204 	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
205 				xrc_srq_context_entry);
206 	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
207 
208 	set_srqc(xrc_srqc, in);
209 	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
210 	memcpy(pas, in->pas, pas_size);
211 	MLX5_SET(create_xrc_srq_in, create_in, opcode,
212 		 MLX5_CMD_OP_CREATE_XRC_SRQ);
213 
214 	memset(create_out, 0, sizeof(create_out));
215 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
216 			    sizeof(create_out));
217 	if (err)
218 		goto out;
219 
220 	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
221 	srq->uid = in->uid;
222 out:
223 	kvfree(create_in);
224 	return err;
225 }
226 
destroy_xrc_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq)227 static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
228 			       struct mlx5_core_srq *srq)
229 {
230 	u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
231 
232 	MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
233 	MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
234 	MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
235 
236 	return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
237 }
238 
arm_xrc_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,u16 lwm)239 static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
240 			   u16 lwm)
241 {
242 	u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
243 
244 	MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
245 	MLX5_SET(arm_xrc_srq_in, in, op_mod,
246 		 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
247 	MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
248 	MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
249 	MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
250 
251 	return  mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
252 }
253 
query_xrc_srq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)254 static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
255 			     struct mlx5_core_srq *srq,
256 			     struct mlx5_srq_attr *out)
257 {
258 	u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
259 	u32 *xrcsrq_out;
260 	void *xrc_srqc;
261 	int err;
262 
263 	xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
264 	if (!xrcsrq_out)
265 		return -ENOMEM;
266 
267 	MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
268 	MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
269 
270 	err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
271 	if (err)
272 		goto out;
273 
274 	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
275 				xrc_srq_context_entry);
276 	get_srqc(xrc_srqc, out);
277 	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
278 		out->flags |= MLX5_SRQ_FLAG_ERR;
279 
280 out:
281 	kvfree(xrcsrq_out);
282 	return err;
283 }
284 
create_rmp_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)285 static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
286 			  struct mlx5_srq_attr *in)
287 {
288 	void *create_out = NULL;
289 	void *create_in = NULL;
290 	void *rmpc;
291 	void *wq;
292 	int pas_size;
293 	int outlen;
294 	int inlen;
295 	int err;
296 
297 	pas_size = get_pas_size(in);
298 	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
299 	outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
300 	create_in = kvzalloc(inlen, GFP_KERNEL);
301 	create_out = kvzalloc(outlen, GFP_KERNEL);
302 	if (!create_in || !create_out) {
303 		err = -ENOMEM;
304 		goto out;
305 	}
306 
307 	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
308 	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
309 
310 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
311 	MLX5_SET(create_rmp_in, create_in, uid, in->uid);
312 	set_wq(wq, in);
313 	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
314 
315 	MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
316 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
317 	if (!err) {
318 		srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
319 		srq->uid = in->uid;
320 	}
321 
322 out:
323 	kvfree(create_in);
324 	kvfree(create_out);
325 	return err;
326 }
327 
destroy_rmp_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq)328 static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
329 {
330 	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
331 
332 	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
333 	MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
334 	MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
335 	return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
336 }
337 
arm_rmp_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,u16 lwm)338 static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
339 		       u16 lwm)
340 {
341 	void *out = NULL;
342 	void *in = NULL;
343 	void *rmpc;
344 	void *wq;
345 	void *bitmask;
346 	int outlen;
347 	int inlen;
348 	int err;
349 
350 	inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
351 	outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
352 
353 	in = kvzalloc(inlen, GFP_KERNEL);
354 	out = kvzalloc(outlen, GFP_KERNEL);
355 	if (!in || !out) {
356 		err = -ENOMEM;
357 		goto out;
358 	}
359 
360 	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
361 	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
362 	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
363 
364 	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
365 	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
366 	MLX5_SET(modify_rmp_in, in, uid, srq->uid);
367 	MLX5_SET(wq,		wq,	 lwm,	    lwm);
368 	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
369 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
370 	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
371 
372 	err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
373 
374 out:
375 	kvfree(in);
376 	kvfree(out);
377 	return err;
378 }
379 
query_rmp_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)380 static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
381 			 struct mlx5_srq_attr *out)
382 {
383 	u32 *rmp_out = NULL;
384 	u32 *rmp_in = NULL;
385 	void *rmpc;
386 	int outlen;
387 	int inlen;
388 	int err;
389 
390 	outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
391 	inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
392 
393 	rmp_out = kvzalloc(outlen, GFP_KERNEL);
394 	rmp_in = kvzalloc(inlen, GFP_KERNEL);
395 	if (!rmp_out || !rmp_in) {
396 		err = -ENOMEM;
397 		goto out;
398 	}
399 
400 	MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
401 	MLX5_SET(query_rmp_in, rmp_in, rmpn,   srq->srqn);
402 	err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
403 	if (err)
404 		goto out;
405 
406 	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
407 	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
408 	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
409 		out->flags |= MLX5_SRQ_FLAG_ERR;
410 
411 out:
412 	kvfree(rmp_out);
413 	kvfree(rmp_in);
414 	return err;
415 }
416 
create_xrq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)417 static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
418 			  struct mlx5_srq_attr *in)
419 {
420 	u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
421 	void *create_in;
422 	void *xrqc;
423 	void *wq;
424 	int pas_size;
425 	int inlen;
426 	int err;
427 
428 	pas_size = get_pas_size(in);
429 	inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
430 	create_in = kvzalloc(inlen, GFP_KERNEL);
431 	if (!create_in)
432 		return -ENOMEM;
433 
434 	xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
435 	wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
436 
437 	set_wq(wq, in);
438 	memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
439 
440 	if (in->type == IB_SRQT_TM) {
441 		MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
442 		if (in->flags & MLX5_SRQ_FLAG_RNDV)
443 			MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
444 		MLX5_SET(xrqc, xrqc,
445 			 tag_matching_topology_context.log_matching_list_sz,
446 			 in->tm_log_list_size);
447 	}
448 	MLX5_SET(xrqc, xrqc, user_index, in->user_index);
449 	MLX5_SET(xrqc, xrqc, cqn, in->cqn);
450 	MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
451 	MLX5_SET(create_xrq_in, create_in, uid, in->uid);
452 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
453 			    sizeof(create_out));
454 	kvfree(create_in);
455 	if (!err) {
456 		srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
457 		srq->uid = in->uid;
458 	}
459 
460 	return err;
461 }
462 
destroy_xrq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq)463 static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
464 {
465 	u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
466 
467 	MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
468 	MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
469 	MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
470 
471 	return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
472 }
473 
arm_xrq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,u16 lwm)474 static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
475 		       struct mlx5_core_srq *srq,
476 		       u16 lwm)
477 {
478 	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
479 
480 	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
481 	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
482 	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
483 	MLX5_SET(arm_rq_in, in, lwm, lwm);
484 	MLX5_SET(arm_rq_in, in, uid, srq->uid);
485 
486 	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
487 }
488 
query_xrq_cmd(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)489 static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
490 			 struct mlx5_srq_attr *out)
491 {
492 	u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
493 	u32 *xrq_out;
494 	int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
495 	void *xrqc;
496 	int err;
497 
498 	xrq_out = kvzalloc(outlen, GFP_KERNEL);
499 	if (!xrq_out)
500 		return -ENOMEM;
501 
502 	MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
503 	MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
504 
505 	err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
506 	if (err)
507 		goto out;
508 
509 	xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
510 	get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
511 	if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
512 		out->flags |= MLX5_SRQ_FLAG_ERR;
513 	out->tm_next_tag =
514 		MLX5_GET(xrqc, xrqc,
515 			 tag_matching_topology_context.append_next_index);
516 	out->tm_hw_phase_cnt =
517 		MLX5_GET(xrqc, xrqc,
518 			 tag_matching_topology_context.hw_phase_cnt);
519 	out->tm_sw_phase_cnt =
520 		MLX5_GET(xrqc, xrqc,
521 			 tag_matching_topology_context.sw_phase_cnt);
522 
523 out:
524 	kvfree(xrq_out);
525 	return err;
526 }
527 
create_srq_split(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)528 static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
529 			    struct mlx5_srq_attr *in)
530 {
531 	if (!dev->mdev->issi)
532 		return create_srq_cmd(dev, srq, in);
533 	switch (srq->common.res) {
534 	case MLX5_RES_XSRQ:
535 		return create_xrc_srq_cmd(dev, srq, in);
536 	case MLX5_RES_XRQ:
537 		return create_xrq_cmd(dev, srq, in);
538 	default:
539 		return create_rmp_cmd(dev, srq, in);
540 	}
541 }
542 
destroy_srq_split(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq)543 static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
544 {
545 	if (!dev->mdev->issi)
546 		return destroy_srq_cmd(dev, srq);
547 	switch (srq->common.res) {
548 	case MLX5_RES_XSRQ:
549 		return destroy_xrc_srq_cmd(dev, srq);
550 	case MLX5_RES_XRQ:
551 		return destroy_xrq_cmd(dev, srq);
552 	default:
553 		return destroy_rmp_cmd(dev, srq);
554 	}
555 }
556 
mlx5_cmd_create_srq(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)557 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
558 			struct mlx5_srq_attr *in)
559 {
560 	struct mlx5_srq_table *table = &dev->srq_table;
561 	int err;
562 
563 	switch (in->type) {
564 	case IB_SRQT_XRC:
565 		srq->common.res = MLX5_RES_XSRQ;
566 		break;
567 	case IB_SRQT_TM:
568 		srq->common.res = MLX5_RES_XRQ;
569 		break;
570 	default:
571 		srq->common.res = MLX5_RES_SRQ;
572 	}
573 
574 	err = create_srq_split(dev, srq, in);
575 	if (err)
576 		return err;
577 
578 	refcount_set(&srq->common.refcount, 1);
579 	init_completion(&srq->common.free);
580 
581 	err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
582 	if (err)
583 		goto err_destroy_srq_split;
584 
585 	return 0;
586 
587 err_destroy_srq_split:
588 	destroy_srq_split(dev, srq);
589 
590 	return err;
591 }
592 
mlx5_cmd_destroy_srq(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq)593 int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
594 {
595 	struct mlx5_srq_table *table = &dev->srq_table;
596 	struct mlx5_core_srq *tmp;
597 	int err;
598 
599 	/* Delete entry, but leave index occupied */
600 	tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
601 	if (WARN_ON(tmp != srq))
602 		return xa_err(tmp) ?: -EINVAL;
603 
604 	err = destroy_srq_split(dev, srq);
605 	if (err) {
606 		/*
607 		 * We don't need to check returned result for an error,
608 		 * because  we are storing in pre-allocated space xarray
609 		 * entry and it can't fail at this stage.
610 		 */
611 		xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
612 		return err;
613 	}
614 	xa_erase_irq(&table->array, srq->srqn);
615 
616 	mlx5_core_res_put(&srq->common);
617 	wait_for_completion(&srq->common.free);
618 	return 0;
619 }
620 
mlx5_cmd_query_srq(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)621 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
622 		       struct mlx5_srq_attr *out)
623 {
624 	if (!dev->mdev->issi)
625 		return query_srq_cmd(dev, srq, out);
626 	switch (srq->common.res) {
627 	case MLX5_RES_XSRQ:
628 		return query_xrc_srq_cmd(dev, srq, out);
629 	case MLX5_RES_XRQ:
630 		return query_xrq_cmd(dev, srq, out);
631 	default:
632 		return query_rmp_cmd(dev, srq, out);
633 	}
634 }
635 
mlx5_cmd_arm_srq(struct mlx5_ib_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)636 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
637 		     u16 lwm, int is_srq)
638 {
639 	if (!dev->mdev->issi)
640 		return arm_srq_cmd(dev, srq, lwm, is_srq);
641 	switch (srq->common.res) {
642 	case MLX5_RES_XSRQ:
643 		return arm_xrc_srq_cmd(dev, srq, lwm);
644 	case MLX5_RES_XRQ:
645 		return arm_xrq_cmd(dev, srq, lwm);
646 	default:
647 		return arm_rmp_cmd(dev, srq, lwm);
648 	}
649 }
650 
srq_event_notifier(struct notifier_block * nb,unsigned long type,void * data)651 static int srq_event_notifier(struct notifier_block *nb,
652 			      unsigned long type, void *data)
653 {
654 	struct mlx5_srq_table *table;
655 	struct mlx5_core_srq *srq;
656 	struct mlx5_eqe *eqe;
657 	u32 srqn;
658 
659 	if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
660 	    type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
661 		return NOTIFY_DONE;
662 
663 	table = container_of(nb, struct mlx5_srq_table, nb);
664 
665 	eqe = data;
666 	srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
667 
668 	xa_lock(&table->array);
669 	srq = xa_load(&table->array, srqn);
670 	if (srq)
671 		refcount_inc(&srq->common.refcount);
672 	xa_unlock(&table->array);
673 
674 	if (!srq)
675 		return NOTIFY_OK;
676 
677 	srq->event(srq, eqe->type);
678 
679 	mlx5_core_res_put(&srq->common);
680 
681 	return NOTIFY_OK;
682 }
683 
mlx5_init_srq_table(struct mlx5_ib_dev * dev)684 int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
685 {
686 	struct mlx5_srq_table *table = &dev->srq_table;
687 
688 	memset(table, 0, sizeof(*table));
689 	xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
690 
691 	table->nb.notifier_call = srq_event_notifier;
692 	mlx5_notifier_register(dev->mdev, &table->nb);
693 
694 	return 0;
695 }
696 
mlx5_cleanup_srq_table(struct mlx5_ib_dev * dev)697 void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
698 {
699 	struct mlx5_srq_table *table = &dev->srq_table;
700 
701 	mlx5_notifier_unregister(dev->mdev, &table->nb);
702 }
703