• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 
35 #include <rdma/ib_verbs.h>
36 
37 #include "cxgb3_offload.h"
38 #include "iwch_provider.h"
39 #include <rdma/cxgb3-abi.h>
40 #include "iwch.h"
41 #include "iwch_cm.h"
42 
43 #define DRV_VERSION "1.1"
44 
45 MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
46 MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
47 MODULE_LICENSE("Dual BSD/GPL");
48 
49 static void open_rnic_dev(struct t3cdev *);
50 static void close_rnic_dev(struct t3cdev *);
51 static void iwch_event_handler(struct t3cdev *, u32, u32);
52 
53 struct cxgb3_client t3c_client = {
54 	.name = "iw_cxgb3",
55 	.add = open_rnic_dev,
56 	.remove = close_rnic_dev,
57 	.handlers = t3c_handlers,
58 	.redirect = iwch_ep_redirect,
59 	.event_handler = iwch_event_handler
60 };
61 
62 static LIST_HEAD(dev_list);
63 static DEFINE_MUTEX(dev_mutex);
64 
disable_qp_db(int id,void * p,void * data)65 static int disable_qp_db(int id, void *p, void *data)
66 {
67 	struct iwch_qp *qhp = p;
68 
69 	cxio_disable_wq_db(&qhp->wq);
70 	return 0;
71 }
72 
enable_qp_db(int id,void * p,void * data)73 static int enable_qp_db(int id, void *p, void *data)
74 {
75 	struct iwch_qp *qhp = p;
76 
77 	if (data)
78 		ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
79 	cxio_enable_wq_db(&qhp->wq);
80 	return 0;
81 }
82 
disable_dbs(struct iwch_dev * rnicp)83 static void disable_dbs(struct iwch_dev *rnicp)
84 {
85 	spin_lock_irq(&rnicp->lock);
86 	idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
87 	spin_unlock_irq(&rnicp->lock);
88 }
89 
enable_dbs(struct iwch_dev * rnicp,int ring_db)90 static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
91 {
92 	spin_lock_irq(&rnicp->lock);
93 	idr_for_each(&rnicp->qpidr, enable_qp_db,
94 		     (void *)(unsigned long)ring_db);
95 	spin_unlock_irq(&rnicp->lock);
96 }
97 
iwch_db_drop_task(struct work_struct * work)98 static void iwch_db_drop_task(struct work_struct *work)
99 {
100 	struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
101 					      db_drop_task.work);
102 	enable_dbs(rnicp, 1);
103 }
104 
rnic_init(struct iwch_dev * rnicp)105 static void rnic_init(struct iwch_dev *rnicp)
106 {
107 	pr_debug("%s iwch_dev %p\n", __func__,  rnicp);
108 	idr_init(&rnicp->cqidr);
109 	idr_init(&rnicp->qpidr);
110 	idr_init(&rnicp->mmidr);
111 	spin_lock_init(&rnicp->lock);
112 	INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
113 
114 	rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
115 	rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
116 	rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
117 	rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
118 	rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
119 	rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
120 	rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
121 	rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
122 	rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
123 	rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
124 	rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
125 	rnicp->attr.can_resize_wq = 0;
126 	rnicp->attr.max_rdma_reads_per_qp = 8;
127 	rnicp->attr.max_rdma_read_resources =
128 	    rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
129 	rnicp->attr.max_rdma_read_qp_depth = 8;	/* IRD */
130 	rnicp->attr.max_rdma_read_depth =
131 	    rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
132 	rnicp->attr.rq_overflow_handled = 0;
133 	rnicp->attr.can_modify_ird = 0;
134 	rnicp->attr.can_modify_ord = 0;
135 	rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
136 	rnicp->attr.stag0_value = 1;
137 	rnicp->attr.zbva_support = 1;
138 	rnicp->attr.local_invalidate_fence = 1;
139 	rnicp->attr.cq_overflow_detection = 1;
140 	return;
141 }
142 
open_rnic_dev(struct t3cdev * tdev)143 static void open_rnic_dev(struct t3cdev *tdev)
144 {
145 	struct iwch_dev *rnicp;
146 
147 	pr_debug("%s t3cdev %p\n", __func__,  tdev);
148 	pr_info_once("Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION);
149 	rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
150 	if (!rnicp) {
151 		pr_err("Cannot allocate ib device\n");
152 		return;
153 	}
154 	rnicp->rdev.ulp = rnicp;
155 	rnicp->rdev.t3cdev_p = tdev;
156 
157 	mutex_lock(&dev_mutex);
158 
159 	if (cxio_rdev_open(&rnicp->rdev)) {
160 		mutex_unlock(&dev_mutex);
161 		pr_err("Unable to open CXIO rdev\n");
162 		ib_dealloc_device(&rnicp->ibdev);
163 		return;
164 	}
165 
166 	rnic_init(rnicp);
167 
168 	list_add_tail(&rnicp->entry, &dev_list);
169 	mutex_unlock(&dev_mutex);
170 
171 	if (iwch_register_device(rnicp)) {
172 		pr_err("Unable to register device\n");
173 		close_rnic_dev(tdev);
174 	}
175 	pr_info("Initialized device %s\n",
176 		pci_name(rnicp->rdev.rnic_info.pdev));
177 	return;
178 }
179 
close_rnic_dev(struct t3cdev * tdev)180 static void close_rnic_dev(struct t3cdev *tdev)
181 {
182 	struct iwch_dev *dev, *tmp;
183 	pr_debug("%s t3cdev %p\n", __func__,  tdev);
184 	mutex_lock(&dev_mutex);
185 	list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
186 		if (dev->rdev.t3cdev_p == tdev) {
187 			dev->rdev.flags = CXIO_ERROR_FATAL;
188 			synchronize_net();
189 			cancel_delayed_work_sync(&dev->db_drop_task);
190 			list_del(&dev->entry);
191 			iwch_unregister_device(dev);
192 			cxio_rdev_close(&dev->rdev);
193 			idr_destroy(&dev->cqidr);
194 			idr_destroy(&dev->qpidr);
195 			idr_destroy(&dev->mmidr);
196 			ib_dealloc_device(&dev->ibdev);
197 			break;
198 		}
199 	}
200 	mutex_unlock(&dev_mutex);
201 }
202 
iwch_event_handler(struct t3cdev * tdev,u32 evt,u32 port_id)203 static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
204 {
205 	struct cxio_rdev *rdev = tdev->ulp;
206 	struct iwch_dev *rnicp;
207 	struct ib_event event;
208 	u32 portnum = port_id + 1;
209 	int dispatch = 0;
210 
211 	if (!rdev)
212 		return;
213 	rnicp = rdev_to_iwch_dev(rdev);
214 	switch (evt) {
215 	case OFFLOAD_STATUS_DOWN: {
216 		rdev->flags = CXIO_ERROR_FATAL;
217 		synchronize_net();
218 		event.event  = IB_EVENT_DEVICE_FATAL;
219 		dispatch = 1;
220 		break;
221 		}
222 	case OFFLOAD_PORT_DOWN: {
223 		event.event  = IB_EVENT_PORT_ERR;
224 		dispatch = 1;
225 		break;
226 		}
227 	case OFFLOAD_PORT_UP: {
228 		event.event  = IB_EVENT_PORT_ACTIVE;
229 		dispatch = 1;
230 		break;
231 		}
232 	case OFFLOAD_DB_FULL: {
233 		disable_dbs(rnicp);
234 		break;
235 		}
236 	case OFFLOAD_DB_EMPTY: {
237 		enable_dbs(rnicp, 1);
238 		break;
239 		}
240 	case OFFLOAD_DB_DROP: {
241 		unsigned long delay = 1000;
242 		unsigned short r;
243 
244 		disable_dbs(rnicp);
245 		get_random_bytes(&r, 2);
246 		delay += r & 1023;
247 
248 		/*
249 		 * delay is between 1000-2023 usecs.
250 		 */
251 		schedule_delayed_work(&rnicp->db_drop_task,
252 			usecs_to_jiffies(delay));
253 		break;
254 		}
255 	}
256 
257 	if (dispatch) {
258 		event.device = &rnicp->ibdev;
259 		event.element.port_num = portnum;
260 		ib_dispatch_event(&event);
261 	}
262 
263 	return;
264 }
265 
iwch_init_module(void)266 static int __init iwch_init_module(void)
267 {
268 	int err;
269 
270 	err = cxio_hal_init();
271 	if (err)
272 		return err;
273 	err = iwch_cm_init();
274 	if (err)
275 		return err;
276 	cxio_register_ev_cb(iwch_ev_dispatch);
277 	cxgb3_register_client(&t3c_client);
278 	return 0;
279 }
280 
iwch_exit_module(void)281 static void __exit iwch_exit_module(void)
282 {
283 	cxgb3_unregister_client(&t3c_client);
284 	cxio_unregister_ev_cb(iwch_ev_dispatch);
285 	iwch_cm_term();
286 	cxio_hal_exit();
287 }
288 
289 module_init(iwch_init_module);
290 module_exit(iwch_exit_module);
291