1 /*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34
35 #include <rdma/ib_verbs.h>
36
37 #include "cxgb3_offload.h"
38 #include "iwch_provider.h"
39 #include "iwch_user.h"
40 #include "iwch.h"
41 #include "iwch_cm.h"
42
43 #define DRV_VERSION "1.1"
44
45 MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
46 MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
47 MODULE_LICENSE("Dual BSD/GPL");
48 MODULE_VERSION(DRV_VERSION);
49
50 static void open_rnic_dev(struct t3cdev *);
51 static void close_rnic_dev(struct t3cdev *);
52 static void iwch_event_handler(struct t3cdev *, u32, u32);
53
54 struct cxgb3_client t3c_client = {
55 .name = "iw_cxgb3",
56 .add = open_rnic_dev,
57 .remove = close_rnic_dev,
58 .handlers = t3c_handlers,
59 .redirect = iwch_ep_redirect,
60 .event_handler = iwch_event_handler
61 };
62
63 static LIST_HEAD(dev_list);
64 static DEFINE_MUTEX(dev_mutex);
65
disable_qp_db(int id,void * p,void * data)66 static int disable_qp_db(int id, void *p, void *data)
67 {
68 struct iwch_qp *qhp = p;
69
70 cxio_disable_wq_db(&qhp->wq);
71 return 0;
72 }
73
enable_qp_db(int id,void * p,void * data)74 static int enable_qp_db(int id, void *p, void *data)
75 {
76 struct iwch_qp *qhp = p;
77
78 if (data)
79 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
80 cxio_enable_wq_db(&qhp->wq);
81 return 0;
82 }
83
disable_dbs(struct iwch_dev * rnicp)84 static void disable_dbs(struct iwch_dev *rnicp)
85 {
86 spin_lock_irq(&rnicp->lock);
87 idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
88 spin_unlock_irq(&rnicp->lock);
89 }
90
enable_dbs(struct iwch_dev * rnicp,int ring_db)91 static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
92 {
93 spin_lock_irq(&rnicp->lock);
94 idr_for_each(&rnicp->qpidr, enable_qp_db,
95 (void *)(unsigned long)ring_db);
96 spin_unlock_irq(&rnicp->lock);
97 }
98
iwch_db_drop_task(struct work_struct * work)99 static void iwch_db_drop_task(struct work_struct *work)
100 {
101 struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
102 db_drop_task.work);
103 enable_dbs(rnicp, 1);
104 }
105
rnic_init(struct iwch_dev * rnicp)106 static void rnic_init(struct iwch_dev *rnicp)
107 {
108 PDBG("%s iwch_dev %p\n", __func__, rnicp);
109 idr_init(&rnicp->cqidr);
110 idr_init(&rnicp->qpidr);
111 idr_init(&rnicp->mmidr);
112 spin_lock_init(&rnicp->lock);
113 INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
114
115 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
116 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
117 rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
118 rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
119 rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
120 rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
121 rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
122 rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
123 rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
124 rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
125 rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
126 rnicp->attr.can_resize_wq = 0;
127 rnicp->attr.max_rdma_reads_per_qp = 8;
128 rnicp->attr.max_rdma_read_resources =
129 rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
130 rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
131 rnicp->attr.max_rdma_read_depth =
132 rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
133 rnicp->attr.rq_overflow_handled = 0;
134 rnicp->attr.can_modify_ird = 0;
135 rnicp->attr.can_modify_ord = 0;
136 rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
137 rnicp->attr.stag0_value = 1;
138 rnicp->attr.zbva_support = 1;
139 rnicp->attr.local_invalidate_fence = 1;
140 rnicp->attr.cq_overflow_detection = 1;
141 return;
142 }
143
open_rnic_dev(struct t3cdev * tdev)144 static void open_rnic_dev(struct t3cdev *tdev)
145 {
146 struct iwch_dev *rnicp;
147
148 PDBG("%s t3cdev %p\n", __func__, tdev);
149 printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
150 DRV_VERSION);
151 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
152 if (!rnicp) {
153 printk(KERN_ERR MOD "Cannot allocate ib device\n");
154 return;
155 }
156 rnicp->rdev.ulp = rnicp;
157 rnicp->rdev.t3cdev_p = tdev;
158
159 mutex_lock(&dev_mutex);
160
161 if (cxio_rdev_open(&rnicp->rdev)) {
162 mutex_unlock(&dev_mutex);
163 printk(KERN_ERR MOD "Unable to open CXIO rdev\n");
164 ib_dealloc_device(&rnicp->ibdev);
165 return;
166 }
167
168 rnic_init(rnicp);
169
170 list_add_tail(&rnicp->entry, &dev_list);
171 mutex_unlock(&dev_mutex);
172
173 if (iwch_register_device(rnicp)) {
174 printk(KERN_ERR MOD "Unable to register device\n");
175 close_rnic_dev(tdev);
176 }
177 printk(KERN_INFO MOD "Initialized device %s\n",
178 pci_name(rnicp->rdev.rnic_info.pdev));
179 return;
180 }
181
close_rnic_dev(struct t3cdev * tdev)182 static void close_rnic_dev(struct t3cdev *tdev)
183 {
184 struct iwch_dev *dev, *tmp;
185 PDBG("%s t3cdev %p\n", __func__, tdev);
186 mutex_lock(&dev_mutex);
187 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
188 if (dev->rdev.t3cdev_p == tdev) {
189 dev->rdev.flags = CXIO_ERROR_FATAL;
190 synchronize_net();
191 cancel_delayed_work_sync(&dev->db_drop_task);
192 list_del(&dev->entry);
193 iwch_unregister_device(dev);
194 cxio_rdev_close(&dev->rdev);
195 idr_destroy(&dev->cqidr);
196 idr_destroy(&dev->qpidr);
197 idr_destroy(&dev->mmidr);
198 ib_dealloc_device(&dev->ibdev);
199 break;
200 }
201 }
202 mutex_unlock(&dev_mutex);
203 }
204
iwch_event_handler(struct t3cdev * tdev,u32 evt,u32 port_id)205 static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
206 {
207 struct cxio_rdev *rdev = tdev->ulp;
208 struct iwch_dev *rnicp;
209 struct ib_event event;
210 u32 portnum = port_id + 1;
211 int dispatch = 0;
212
213 if (!rdev)
214 return;
215 rnicp = rdev_to_iwch_dev(rdev);
216 switch (evt) {
217 case OFFLOAD_STATUS_DOWN: {
218 rdev->flags = CXIO_ERROR_FATAL;
219 synchronize_net();
220 event.event = IB_EVENT_DEVICE_FATAL;
221 dispatch = 1;
222 break;
223 }
224 case OFFLOAD_PORT_DOWN: {
225 event.event = IB_EVENT_PORT_ERR;
226 dispatch = 1;
227 break;
228 }
229 case OFFLOAD_PORT_UP: {
230 event.event = IB_EVENT_PORT_ACTIVE;
231 dispatch = 1;
232 break;
233 }
234 case OFFLOAD_DB_FULL: {
235 disable_dbs(rnicp);
236 break;
237 }
238 case OFFLOAD_DB_EMPTY: {
239 enable_dbs(rnicp, 1);
240 break;
241 }
242 case OFFLOAD_DB_DROP: {
243 unsigned long delay = 1000;
244 unsigned short r;
245
246 disable_dbs(rnicp);
247 get_random_bytes(&r, 2);
248 delay += r & 1023;
249
250 /*
251 * delay is between 1000-2023 usecs.
252 */
253 schedule_delayed_work(&rnicp->db_drop_task,
254 usecs_to_jiffies(delay));
255 break;
256 }
257 }
258
259 if (dispatch) {
260 event.device = &rnicp->ibdev;
261 event.element.port_num = portnum;
262 ib_dispatch_event(&event);
263 }
264
265 return;
266 }
267
iwch_init_module(void)268 static int __init iwch_init_module(void)
269 {
270 int err;
271
272 err = cxio_hal_init();
273 if (err)
274 return err;
275 err = iwch_cm_init();
276 if (err)
277 return err;
278 cxio_register_ev_cb(iwch_ev_dispatch);
279 cxgb3_register_client(&t3c_client);
280 return 0;
281 }
282
iwch_exit_module(void)283 static void __exit iwch_exit_module(void)
284 {
285 cxgb3_unregister_client(&t3c_client);
286 cxio_unregister_ev_cb(iwch_ev_dispatch);
287 iwch_cm_term();
288 cxio_hal_exit();
289 }
290
291 module_init(iwch_init_module);
292 module_exit(iwch_exit_module);
293