• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/hardirq.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include <rdma/ib_verbs.h>
39 #include <linux/mlx5/cq.h>
40 #include "mlx5_core.h"
41 
42 #define TASKLET_MAX_TIME 2
43 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
44 
mlx5_cq_tasklet_cb(unsigned long data)45 void mlx5_cq_tasklet_cb(unsigned long data)
46 {
47 	unsigned long flags;
48 	unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
49 	struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
50 	struct mlx5_core_cq *mcq;
51 	struct mlx5_core_cq *temp;
52 
53 	spin_lock_irqsave(&ctx->lock, flags);
54 	list_splice_tail_init(&ctx->list, &ctx->process_list);
55 	spin_unlock_irqrestore(&ctx->lock, flags);
56 
57 	list_for_each_entry_safe(mcq, temp, &ctx->process_list,
58 				 tasklet_ctx.list) {
59 		list_del_init(&mcq->tasklet_ctx.list);
60 		mcq->tasklet_ctx.comp(mcq);
61 		if (atomic_dec_and_test(&mcq->refcount))
62 			complete(&mcq->free);
63 		if (time_after(jiffies, end))
64 			break;
65 	}
66 
67 	if (!list_empty(&ctx->process_list))
68 		tasklet_schedule(&ctx->task);
69 }
70 
mlx5_add_cq_to_tasklet(struct mlx5_core_cq * cq)71 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
72 {
73 	unsigned long flags;
74 	struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
75 
76 	spin_lock_irqsave(&tasklet_ctx->lock, flags);
77 	/* When migrating CQs between EQs will be implemented, please note
78 	 * that you need to sync this point. It is possible that
79 	 * while migrating a CQ, completions on the old EQs could
80 	 * still arrive.
81 	 */
82 	if (list_empty_careful(&cq->tasklet_ctx.list)) {
83 		atomic_inc(&cq->refcount);
84 		list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
85 	}
86 	spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
87 }
88 
mlx5_cq_completion(struct mlx5_core_dev * dev,u32 cqn)89 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
90 {
91 	struct mlx5_core_cq *cq;
92 	struct mlx5_cq_table *table = &dev->priv.cq_table;
93 
94 	spin_lock(&table->lock);
95 	cq = radix_tree_lookup(&table->tree, cqn);
96 	if (likely(cq))
97 		atomic_inc(&cq->refcount);
98 	spin_unlock(&table->lock);
99 
100 	if (!cq) {
101 		mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
102 		return;
103 	}
104 
105 	++cq->arm_sn;
106 
107 	cq->comp(cq);
108 
109 	if (atomic_dec_and_test(&cq->refcount))
110 		complete(&cq->free);
111 }
112 
mlx5_cq_event(struct mlx5_core_dev * dev,u32 cqn,int event_type)113 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
114 {
115 	struct mlx5_cq_table *table = &dev->priv.cq_table;
116 	struct mlx5_core_cq *cq;
117 
118 	spin_lock(&table->lock);
119 
120 	cq = radix_tree_lookup(&table->tree, cqn);
121 	if (cq)
122 		atomic_inc(&cq->refcount);
123 
124 	spin_unlock(&table->lock);
125 
126 	if (!cq) {
127 		mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
128 		return;
129 	}
130 
131 	cq->event(cq, event_type);
132 
133 	if (atomic_dec_and_test(&cq->refcount))
134 		complete(&cq->free);
135 }
136 
mlx5_core_create_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u32 * in,int inlen)137 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
138 			u32 *in, int inlen)
139 {
140 	struct mlx5_cq_table *table = &dev->priv.cq_table;
141 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
142 	u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
143 	u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
144 	int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
145 			   c_eqn);
146 	struct mlx5_eq *eq;
147 	int err;
148 
149 	eq = mlx5_eqn2eq(dev, eqn);
150 	if (IS_ERR(eq))
151 		return PTR_ERR(eq);
152 
153 	memset(out, 0, sizeof(out));
154 	MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
155 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
156 	if (err)
157 		return err;
158 
159 	cq->cqn = MLX5_GET(create_cq_out, out, cqn);
160 	cq->cons_index = 0;
161 	cq->arm_sn     = 0;
162 	atomic_set(&cq->refcount, 1);
163 	init_completion(&cq->free);
164 	if (!cq->comp)
165 		cq->comp = mlx5_add_cq_to_tasklet;
166 	/* assuming CQ will be deleted before the EQ */
167 	cq->tasklet_ctx.priv = &eq->tasklet_ctx;
168 	INIT_LIST_HEAD(&cq->tasklet_ctx.list);
169 
170 	spin_lock_irq(&table->lock);
171 	err = radix_tree_insert(&table->tree, cq->cqn, cq);
172 	spin_unlock_irq(&table->lock);
173 	if (err)
174 		goto err_cmd;
175 
176 	cq->pid = current->pid;
177 	err = mlx5_debug_cq_add(dev, cq);
178 	if (err)
179 		mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
180 			      cq->cqn);
181 
182 	return 0;
183 
184 err_cmd:
185 	memset(din, 0, sizeof(din));
186 	memset(dout, 0, sizeof(dout));
187 	MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
188 	MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
189 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
190 	return err;
191 }
192 EXPORT_SYMBOL(mlx5_core_create_cq);
193 
mlx5_core_destroy_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)194 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
195 {
196 	struct mlx5_cq_table *table = &dev->priv.cq_table;
197 	u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
198 	u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
199 	struct mlx5_core_cq *tmp;
200 	int err;
201 
202 	spin_lock_irq(&table->lock);
203 	tmp = radix_tree_delete(&table->tree, cq->cqn);
204 	spin_unlock_irq(&table->lock);
205 	if (!tmp) {
206 		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
207 		return -EINVAL;
208 	}
209 	if (tmp != cq) {
210 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
211 		return -EINVAL;
212 	}
213 
214 	MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
215 	MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
216 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
217 	if (err)
218 		return err;
219 
220 	synchronize_irq(cq->irqn);
221 
222 	mlx5_debug_cq_remove(dev, cq);
223 	if (atomic_dec_and_test(&cq->refcount))
224 		complete(&cq->free);
225 	wait_for_completion(&cq->free);
226 
227 	return 0;
228 }
229 EXPORT_SYMBOL(mlx5_core_destroy_cq);
230 
mlx5_core_query_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u32 * out,int outlen)231 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
232 		       u32 *out, int outlen)
233 {
234 	u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
235 
236 	MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
237 	MLX5_SET(query_cq_in, in, cqn, cq->cqn);
238 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
239 }
240 EXPORT_SYMBOL(mlx5_core_query_cq);
241 
mlx5_core_modify_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u32 * in,int inlen)242 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
243 			u32 *in, int inlen)
244 {
245 	u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
246 
247 	MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
248 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
249 }
250 EXPORT_SYMBOL(mlx5_core_modify_cq);
251 
mlx5_core_modify_cq_moderation(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u16 cq_period,u16 cq_max_count)252 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
253 				   struct mlx5_core_cq *cq,
254 				   u16 cq_period,
255 				   u16 cq_max_count)
256 {
257 	u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
258 	void *cqc;
259 
260 	MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
261 	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
262 	MLX5_SET(cqc, cqc, cq_period, cq_period);
263 	MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
264 	MLX5_SET(modify_cq_in, in,
265 		 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
266 		 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
267 
268 	return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
269 }
270 EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
271 
mlx5_init_cq_table(struct mlx5_core_dev * dev)272 int mlx5_init_cq_table(struct mlx5_core_dev *dev)
273 {
274 	struct mlx5_cq_table *table = &dev->priv.cq_table;
275 	int err;
276 
277 	memset(table, 0, sizeof(*table));
278 	spin_lock_init(&table->lock);
279 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
280 	err = mlx5_cq_debugfs_init(dev);
281 
282 	return err;
283 }
284 
mlx5_cleanup_cq_table(struct mlx5_core_dev * dev)285 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
286 {
287 	mlx5_cq_debugfs_cleanup(dev);
288 }
289