1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/list.h>
35 #include <linux/mutex.h>
36 #include <linux/qed/qede_rdma.h>
37 #include "qede.h"
38
39 static struct qedr_driver *qedr_drv;
40 static LIST_HEAD(qedr_dev_list);
41 static DEFINE_MUTEX(qedr_dev_list_lock);
42
qede_rdma_supported(struct qede_dev * dev)43 bool qede_rdma_supported(struct qede_dev *dev)
44 {
45 return dev->dev_info.common.rdma_supported;
46 }
47
_qede_rdma_dev_add(struct qede_dev * edev)48 static void _qede_rdma_dev_add(struct qede_dev *edev)
49 {
50 if (!qedr_drv)
51 return;
52
53 edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
54 edev->ndev);
55 }
56
qede_rdma_create_wq(struct qede_dev * edev)57 static int qede_rdma_create_wq(struct qede_dev *edev)
58 {
59 INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
60 kref_init(&edev->rdma_info.refcnt);
61 init_completion(&edev->rdma_info.event_comp);
62
63 edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
64 if (!edev->rdma_info.rdma_wq) {
65 DP_NOTICE(edev, "qedr: Could not create workqueue\n");
66 return -ENOMEM;
67 }
68
69 return 0;
70 }
71
qede_rdma_cleanup_event(struct qede_dev * edev)72 static void qede_rdma_cleanup_event(struct qede_dev *edev)
73 {
74 struct list_head *head = &edev->rdma_info.rdma_event_list;
75 struct qede_rdma_event_work *event_node;
76
77 flush_workqueue(edev->rdma_info.rdma_wq);
78 while (!list_empty(head)) {
79 event_node = list_entry(head->next, struct qede_rdma_event_work,
80 list);
81 cancel_work_sync(&event_node->work);
82 list_del(&event_node->list);
83 kfree(event_node);
84 }
85 }
86
qede_rdma_complete_event(struct kref * ref)87 static void qede_rdma_complete_event(struct kref *ref)
88 {
89 struct qede_rdma_dev *rdma_dev =
90 container_of(ref, struct qede_rdma_dev, refcnt);
91
92 /* no more events will be added after this */
93 complete(&rdma_dev->event_comp);
94 }
95
qede_rdma_destroy_wq(struct qede_dev * edev)96 static void qede_rdma_destroy_wq(struct qede_dev *edev)
97 {
98 /* Avoid race with add_event flow, make sure it finishes before
99 * we start accessing the list and cleaning up the work
100 */
101 kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
102 wait_for_completion(&edev->rdma_info.event_comp);
103
104 qede_rdma_cleanup_event(edev);
105 destroy_workqueue(edev->rdma_info.rdma_wq);
106 }
107
qede_rdma_dev_add(struct qede_dev * edev)108 int qede_rdma_dev_add(struct qede_dev *edev)
109 {
110 int rc = 0;
111
112 if (qede_rdma_supported(edev)) {
113 rc = qede_rdma_create_wq(edev);
114 if (rc)
115 return rc;
116
117 INIT_LIST_HEAD(&edev->rdma_info.entry);
118 mutex_lock(&qedr_dev_list_lock);
119 list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
120 _qede_rdma_dev_add(edev);
121 mutex_unlock(&qedr_dev_list_lock);
122 }
123
124 return rc;
125 }
126
_qede_rdma_dev_remove(struct qede_dev * edev)127 static void _qede_rdma_dev_remove(struct qede_dev *edev)
128 {
129 if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
130 qedr_drv->remove(edev->rdma_info.qedr_dev);
131 edev->rdma_info.qedr_dev = NULL;
132 }
133
qede_rdma_dev_remove(struct qede_dev * edev)134 void qede_rdma_dev_remove(struct qede_dev *edev)
135 {
136 if (!qede_rdma_supported(edev))
137 return;
138
139 qede_rdma_destroy_wq(edev);
140 mutex_lock(&qedr_dev_list_lock);
141 _qede_rdma_dev_remove(edev);
142 list_del(&edev->rdma_info.entry);
143 mutex_unlock(&qedr_dev_list_lock);
144 }
145
_qede_rdma_dev_open(struct qede_dev * edev)146 static void _qede_rdma_dev_open(struct qede_dev *edev)
147 {
148 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
149 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
150 }
151
qede_rdma_dev_open(struct qede_dev * edev)152 static void qede_rdma_dev_open(struct qede_dev *edev)
153 {
154 if (!qede_rdma_supported(edev))
155 return;
156
157 mutex_lock(&qedr_dev_list_lock);
158 _qede_rdma_dev_open(edev);
159 mutex_unlock(&qedr_dev_list_lock);
160 }
161
_qede_rdma_dev_close(struct qede_dev * edev)162 static void _qede_rdma_dev_close(struct qede_dev *edev)
163 {
164 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
165 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
166 }
167
qede_rdma_dev_close(struct qede_dev * edev)168 static void qede_rdma_dev_close(struct qede_dev *edev)
169 {
170 if (!qede_rdma_supported(edev))
171 return;
172
173 mutex_lock(&qedr_dev_list_lock);
174 _qede_rdma_dev_close(edev);
175 mutex_unlock(&qedr_dev_list_lock);
176 }
177
qede_rdma_dev_shutdown(struct qede_dev * edev)178 static void qede_rdma_dev_shutdown(struct qede_dev *edev)
179 {
180 if (!qede_rdma_supported(edev))
181 return;
182
183 mutex_lock(&qedr_dev_list_lock);
184 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
185 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
186 mutex_unlock(&qedr_dev_list_lock);
187 }
188
qede_rdma_register_driver(struct qedr_driver * drv)189 int qede_rdma_register_driver(struct qedr_driver *drv)
190 {
191 struct qede_dev *edev;
192 u8 qedr_counter = 0;
193
194 mutex_lock(&qedr_dev_list_lock);
195 if (qedr_drv) {
196 mutex_unlock(&qedr_dev_list_lock);
197 return -EINVAL;
198 }
199 qedr_drv = drv;
200
201 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
202 struct net_device *ndev;
203
204 qedr_counter++;
205 _qede_rdma_dev_add(edev);
206 ndev = edev->ndev;
207 if (netif_running(ndev) && netif_oper_up(ndev))
208 _qede_rdma_dev_open(edev);
209 }
210 mutex_unlock(&qedr_dev_list_lock);
211
212 pr_notice("qedr: discovered and registered %d RDMA funcs\n",
213 qedr_counter);
214
215 return 0;
216 }
217 EXPORT_SYMBOL(qede_rdma_register_driver);
218
qede_rdma_unregister_driver(struct qedr_driver * drv)219 void qede_rdma_unregister_driver(struct qedr_driver *drv)
220 {
221 struct qede_dev *edev;
222
223 mutex_lock(&qedr_dev_list_lock);
224 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
225 if (edev->rdma_info.qedr_dev)
226 _qede_rdma_dev_remove(edev);
227 }
228 qedr_drv = NULL;
229 mutex_unlock(&qedr_dev_list_lock);
230 }
231 EXPORT_SYMBOL(qede_rdma_unregister_driver);
232
qede_rdma_changeaddr(struct qede_dev * edev)233 static void qede_rdma_changeaddr(struct qede_dev *edev)
234 {
235 if (!qede_rdma_supported(edev))
236 return;
237
238 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
239 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
240 }
241
242 static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev * edev)243 qede_rdma_get_free_event_node(struct qede_dev *edev)
244 {
245 struct qede_rdma_event_work *event_node = NULL;
246 struct list_head *list_node = NULL;
247 bool found = false;
248
249 list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
250 event_node = list_entry(list_node, struct qede_rdma_event_work,
251 list);
252 if (!work_pending(&event_node->work)) {
253 found = true;
254 break;
255 }
256 }
257
258 if (!found) {
259 event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
260 if (!event_node) {
261 DP_NOTICE(edev,
262 "qedr: Could not allocate memory for rdma work\n");
263 return NULL;
264 }
265 list_add_tail(&event_node->list,
266 &edev->rdma_info.rdma_event_list);
267 }
268
269 return event_node;
270 }
271
qede_rdma_handle_event(struct work_struct * work)272 static void qede_rdma_handle_event(struct work_struct *work)
273 {
274 struct qede_rdma_event_work *event_node;
275 enum qede_rdma_event event;
276 struct qede_dev *edev;
277
278 event_node = container_of(work, struct qede_rdma_event_work, work);
279 event = event_node->event;
280 edev = event_node->ptr;
281
282 switch (event) {
283 case QEDE_UP:
284 qede_rdma_dev_open(edev);
285 break;
286 case QEDE_DOWN:
287 qede_rdma_dev_close(edev);
288 break;
289 case QEDE_CLOSE:
290 qede_rdma_dev_shutdown(edev);
291 break;
292 case QEDE_CHANGE_ADDR:
293 qede_rdma_changeaddr(edev);
294 break;
295 default:
296 DP_NOTICE(edev, "Invalid rdma event %d", event);
297 }
298 }
299
qede_rdma_add_event(struct qede_dev * edev,enum qede_rdma_event event)300 static void qede_rdma_add_event(struct qede_dev *edev,
301 enum qede_rdma_event event)
302 {
303 struct qede_rdma_event_work *event_node;
304
305 if (!edev->rdma_info.qedr_dev)
306 return;
307
308 /* We don't want the cleanup flow to start while we're allocating and
309 * scheduling the work
310 */
311 if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
312 return; /* already being destroyed */
313
314 event_node = qede_rdma_get_free_event_node(edev);
315 if (!event_node)
316 goto out;
317
318 event_node->event = event;
319 event_node->ptr = edev;
320
321 INIT_WORK(&event_node->work, qede_rdma_handle_event);
322 queue_work(edev->rdma_info.rdma_wq, &event_node->work);
323
324 out:
325 kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
326 }
327
qede_rdma_dev_event_open(struct qede_dev * edev)328 void qede_rdma_dev_event_open(struct qede_dev *edev)
329 {
330 qede_rdma_add_event(edev, QEDE_UP);
331 }
332
qede_rdma_dev_event_close(struct qede_dev * edev)333 void qede_rdma_dev_event_close(struct qede_dev *edev)
334 {
335 qede_rdma_add_event(edev, QEDE_DOWN);
336 }
337
qede_rdma_event_changeaddr(struct qede_dev * edev)338 void qede_rdma_event_changeaddr(struct qede_dev *edev)
339 {
340 qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
341 }
342