1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37 #include <linux/hardirq.h>
38 #include <linux/export.h>
39
40 #include <linux/mlx4/cmd.h>
41 #include <linux/mlx4/cq.h>
42
43 #include "mlx4.h"
44 #include "icm.h"
45
46 #define MLX4_CQ_STATUS_OK ( 0 << 28)
47 #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
48 #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
49 #define MLX4_CQ_FLAG_CC ( 1 << 18)
50 #define MLX4_CQ_FLAG_OI ( 1 << 17)
51 #define MLX4_CQ_STATE_ARMED ( 9 << 8)
52 #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
53 #define MLX4_EQ_STATE_FIRED (10 << 8)
54
mlx4_cq_completion(struct mlx4_dev * dev,u32 cqn)55 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
56 {
57 struct mlx4_cq *cq;
58
59 rcu_read_lock();
60 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
61 cqn & (dev->caps.num_cqs - 1));
62 rcu_read_unlock();
63
64 if (!cq) {
65 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
66 return;
67 }
68
69 /* Acessing the CQ outside of rcu_read_lock is safe, because
70 * the CQ is freed only after interrupt handling is completed.
71 */
72 ++cq->arm_sn;
73
74 cq->comp(cq);
75 }
76
mlx4_cq_event(struct mlx4_dev * dev,u32 cqn,int event_type)77 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
78 {
79 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
80 struct mlx4_cq *cq;
81
82 rcu_read_lock();
83 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
84 rcu_read_unlock();
85
86 if (!cq) {
87 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
88 return;
89 }
90
91 /* Acessing the CQ outside of rcu_read_lock is safe, because
92 * the CQ is freed only after interrupt handling is completed.
93 */
94 cq->event(cq, event_type);
95 }
96
mlx4_SW2HW_CQ(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * mailbox,int cq_num)97 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
98 int cq_num)
99 {
100 return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
101 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
102 MLX4_CMD_WRAPPED);
103 }
104
mlx4_MODIFY_CQ(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * mailbox,int cq_num,u32 opmod)105 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
106 int cq_num, u32 opmod)
107 {
108 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
109 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
110 }
111
mlx4_HW2SW_CQ(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * mailbox,int cq_num)112 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
113 int cq_num)
114 {
115 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
116 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
117 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
118 }
119
mlx4_cq_modify(struct mlx4_dev * dev,struct mlx4_cq * cq,u16 count,u16 period)120 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
121 u16 count, u16 period)
122 {
123 struct mlx4_cmd_mailbox *mailbox;
124 struct mlx4_cq_context *cq_context;
125 int err;
126
127 mailbox = mlx4_alloc_cmd_mailbox(dev);
128 if (IS_ERR(mailbox))
129 return PTR_ERR(mailbox);
130
131 cq_context = mailbox->buf;
132 cq_context->cq_max_count = cpu_to_be16(count);
133 cq_context->cq_period = cpu_to_be16(period);
134
135 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
136
137 mlx4_free_cmd_mailbox(dev, mailbox);
138 return err;
139 }
140 EXPORT_SYMBOL_GPL(mlx4_cq_modify);
141
mlx4_cq_resize(struct mlx4_dev * dev,struct mlx4_cq * cq,int entries,struct mlx4_mtt * mtt)142 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
143 int entries, struct mlx4_mtt *mtt)
144 {
145 struct mlx4_cmd_mailbox *mailbox;
146 struct mlx4_cq_context *cq_context;
147 u64 mtt_addr;
148 int err;
149
150 mailbox = mlx4_alloc_cmd_mailbox(dev);
151 if (IS_ERR(mailbox))
152 return PTR_ERR(mailbox);
153
154 cq_context = mailbox->buf;
155 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
156 cq_context->log_page_size = mtt->page_shift - 12;
157 mtt_addr = mlx4_mtt_addr(dev, mtt);
158 cq_context->mtt_base_addr_h = mtt_addr >> 32;
159 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
160
161 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
162
163 mlx4_free_cmd_mailbox(dev, mailbox);
164 return err;
165 }
166 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
167
__mlx4_cq_alloc_icm(struct mlx4_dev * dev,int * cqn)168 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
169 {
170 struct mlx4_priv *priv = mlx4_priv(dev);
171 struct mlx4_cq_table *cq_table = &priv->cq_table;
172 int err;
173
174 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
175 if (*cqn == -1)
176 return -ENOMEM;
177
178 err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
179 if (err)
180 goto err_out;
181
182 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
183 if (err)
184 goto err_put;
185 return 0;
186
187 err_put:
188 mlx4_table_put(dev, &cq_table->table, *cqn);
189
190 err_out:
191 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
192 return err;
193 }
194
mlx4_cq_alloc_icm(struct mlx4_dev * dev,int * cqn)195 static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
196 {
197 u64 out_param;
198 int err;
199
200 if (mlx4_is_mfunc(dev)) {
201 err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
202 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
203 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
204 if (err)
205 return err;
206 else {
207 *cqn = get_param_l(&out_param);
208 return 0;
209 }
210 }
211 return __mlx4_cq_alloc_icm(dev, cqn);
212 }
213
__mlx4_cq_free_icm(struct mlx4_dev * dev,int cqn)214 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
215 {
216 struct mlx4_priv *priv = mlx4_priv(dev);
217 struct mlx4_cq_table *cq_table = &priv->cq_table;
218
219 mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
220 mlx4_table_put(dev, &cq_table->table, cqn);
221 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
222 }
223
mlx4_cq_free_icm(struct mlx4_dev * dev,int cqn)224 static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
225 {
226 u64 in_param = 0;
227 int err;
228
229 if (mlx4_is_mfunc(dev)) {
230 set_param_l(&in_param, cqn);
231 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
232 MLX4_CMD_FREE_RES,
233 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
234 if (err)
235 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
236 } else
237 __mlx4_cq_free_icm(dev, cqn);
238 }
239
mlx4_cq_alloc(struct mlx4_dev * dev,int nent,struct mlx4_mtt * mtt,struct mlx4_uar * uar,u64 db_rec,struct mlx4_cq * cq,unsigned vector,int collapsed,int timestamp_en)240 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
241 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
242 struct mlx4_cq *cq, unsigned vector, int collapsed,
243 int timestamp_en)
244 {
245 struct mlx4_priv *priv = mlx4_priv(dev);
246 struct mlx4_cq_table *cq_table = &priv->cq_table;
247 struct mlx4_cmd_mailbox *mailbox;
248 struct mlx4_cq_context *cq_context;
249 u64 mtt_addr;
250 int err;
251
252 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
253 return -EINVAL;
254
255 cq->vector = vector;
256
257 err = mlx4_cq_alloc_icm(dev, &cq->cqn);
258 if (err)
259 return err;
260
261 spin_lock(&cq_table->lock);
262 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
263 spin_unlock(&cq_table->lock);
264 if (err)
265 goto err_icm;
266
267 mailbox = mlx4_alloc_cmd_mailbox(dev);
268 if (IS_ERR(mailbox)) {
269 err = PTR_ERR(mailbox);
270 goto err_radix;
271 }
272
273 cq_context = mailbox->buf;
274 cq_context->flags = cpu_to_be32(!!collapsed << 18);
275 if (timestamp_en)
276 cq_context->flags |= cpu_to_be32(1 << 19);
277
278 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
279 cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
280 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
281
282 mtt_addr = mlx4_mtt_addr(dev, mtt);
283 cq_context->mtt_base_addr_h = mtt_addr >> 32;
284 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
285 cq_context->db_rec_addr = cpu_to_be64(db_rec);
286
287 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
288 mlx4_free_cmd_mailbox(dev, mailbox);
289 if (err)
290 goto err_radix;
291
292 cq->cons_index = 0;
293 cq->arm_sn = 1;
294 cq->uar = uar;
295 atomic_set(&cq->refcount, 1);
296 init_completion(&cq->free);
297
298 cq->irq = priv->eq_table.eq[cq->vector].irq;
299 return 0;
300
301 err_radix:
302 spin_lock(&cq_table->lock);
303 radix_tree_delete(&cq_table->tree, cq->cqn);
304 spin_unlock(&cq_table->lock);
305
306 err_icm:
307 mlx4_cq_free_icm(dev, cq->cqn);
308
309 return err;
310 }
311 EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
312
mlx4_cq_free(struct mlx4_dev * dev,struct mlx4_cq * cq)313 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
314 {
315 struct mlx4_priv *priv = mlx4_priv(dev);
316 struct mlx4_cq_table *cq_table = &priv->cq_table;
317 int err;
318
319 spin_lock(&cq_table->lock);
320 radix_tree_delete(&cq_table->tree, cq->cqn);
321 spin_unlock(&cq_table->lock);
322
323 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
324 if (err)
325 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
326
327 synchronize_irq(priv->eq_table.eq[cq->vector].irq);
328
329 if (atomic_dec_and_test(&cq->refcount))
330 complete(&cq->free);
331 wait_for_completion(&cq->free);
332
333 mlx4_cq_free_icm(dev, cq->cqn);
334 }
335 EXPORT_SYMBOL_GPL(mlx4_cq_free);
336
mlx4_init_cq_table(struct mlx4_dev * dev)337 int mlx4_init_cq_table(struct mlx4_dev *dev)
338 {
339 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
340 int err;
341
342 spin_lock_init(&cq_table->lock);
343 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
344 if (mlx4_is_slave(dev))
345 return 0;
346
347 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
348 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
349 if (err)
350 return err;
351
352 return 0;
353 }
354
mlx4_cleanup_cq_table(struct mlx4_dev * dev)355 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
356 {
357 if (mlx4_is_slave(dev))
358 return;
359 /* Nothing to do to clean up radix_tree */
360 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
361 }
362