1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include "mlx5_core.h"
35
36 static LIST_HEAD(intf_list);
37 static LIST_HEAD(mlx5_dev_list);
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40
41 struct mlx5_device_context {
42 struct list_head list;
43 struct mlx5_interface *intf;
44 void *context;
45 unsigned long state;
46 };
47
48 struct mlx5_delayed_event {
49 struct list_head list;
50 struct mlx5_core_dev *dev;
51 enum mlx5_dev_event event;
52 unsigned long param;
53 };
54
55 enum {
56 MLX5_INTERFACE_ADDED,
57 MLX5_INTERFACE_ATTACHED,
58 };
59
add_delayed_event(struct mlx5_priv * priv,struct mlx5_core_dev * dev,enum mlx5_dev_event event,unsigned long param)60 static void add_delayed_event(struct mlx5_priv *priv,
61 struct mlx5_core_dev *dev,
62 enum mlx5_dev_event event,
63 unsigned long param)
64 {
65 struct mlx5_delayed_event *delayed_event;
66
67 delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
68 if (!delayed_event) {
69 mlx5_core_err(dev, "event %d is missed\n", event);
70 return;
71 }
72
73 mlx5_core_dbg(dev, "Accumulating event %d\n", event);
74 delayed_event->dev = dev;
75 delayed_event->event = event;
76 delayed_event->param = param;
77 list_add_tail(&delayed_event->list, &priv->waiting_events_list);
78 }
79
delayed_event_release(struct mlx5_device_context * dev_ctx,struct mlx5_priv * priv)80 static void delayed_event_release(struct mlx5_device_context *dev_ctx,
81 struct mlx5_priv *priv)
82 {
83 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
84 struct mlx5_delayed_event *de;
85 struct mlx5_delayed_event *n;
86 struct list_head temp;
87
88 INIT_LIST_HEAD(&temp);
89
90 spin_lock_irq(&priv->ctx_lock);
91
92 priv->is_accum_events = false;
93 list_splice_init(&priv->waiting_events_list, &temp);
94 if (!dev_ctx->context)
95 goto out;
96 list_for_each_entry_safe(de, n, &temp, list)
97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
98
99 out:
100 spin_unlock_irq(&priv->ctx_lock);
101
102 list_for_each_entry_safe(de, n, &temp, list) {
103 list_del(&de->list);
104 kfree(de);
105 }
106 }
107
108 /* accumulating events that can come after mlx5_ib calls to
109 * ib_register_device, till adding that interface to the events list.
110 */
delayed_event_start(struct mlx5_priv * priv)111 static void delayed_event_start(struct mlx5_priv *priv)
112 {
113 spin_lock_irq(&priv->ctx_lock);
114 priv->is_accum_events = true;
115 spin_unlock_irq(&priv->ctx_lock);
116 }
117
mlx5_add_device(struct mlx5_interface * intf,struct mlx5_priv * priv)118 void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
119 {
120 struct mlx5_device_context *dev_ctx;
121 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
122
123 if (!mlx5_lag_intf_add(intf, priv))
124 return;
125
126 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
127 if (!dev_ctx)
128 return;
129
130 dev_ctx->intf = intf;
131
132 delayed_event_start(priv);
133
134 dev_ctx->context = intf->add(dev);
135 if (dev_ctx->context) {
136 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
137 if (intf->attach)
138 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
139
140 spin_lock_irq(&priv->ctx_lock);
141 list_add_tail(&dev_ctx->list, &priv->ctx_list);
142
143 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
144 if (dev_ctx->intf->pfault) {
145 if (priv->pfault) {
146 mlx5_core_err(dev, "multiple page fault handlers not supported");
147 } else {
148 priv->pfault_ctx = dev_ctx->context;
149 priv->pfault = dev_ctx->intf->pfault;
150 }
151 }
152 #endif
153 spin_unlock_irq(&priv->ctx_lock);
154 }
155
156 delayed_event_release(dev_ctx, priv);
157
158 if (!dev_ctx->context)
159 kfree(dev_ctx);
160 }
161
mlx5_get_device(struct mlx5_interface * intf,struct mlx5_priv * priv)162 static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
163 struct mlx5_priv *priv)
164 {
165 struct mlx5_device_context *dev_ctx;
166
167 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
168 if (dev_ctx->intf == intf)
169 return dev_ctx;
170 return NULL;
171 }
172
mlx5_remove_device(struct mlx5_interface * intf,struct mlx5_priv * priv)173 void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
174 {
175 struct mlx5_device_context *dev_ctx;
176 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
177
178 dev_ctx = mlx5_get_device(intf, priv);
179 if (!dev_ctx)
180 return;
181
182 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
183 spin_lock_irq(&priv->ctx_lock);
184 if (priv->pfault == dev_ctx->intf->pfault)
185 priv->pfault = NULL;
186 spin_unlock_irq(&priv->ctx_lock);
187
188 synchronize_srcu(&priv->pfault_srcu);
189 #endif
190
191 spin_lock_irq(&priv->ctx_lock);
192 list_del(&dev_ctx->list);
193 spin_unlock_irq(&priv->ctx_lock);
194
195 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
196 intf->remove(dev, dev_ctx->context);
197
198 kfree(dev_ctx);
199 }
200
mlx5_attach_interface(struct mlx5_interface * intf,struct mlx5_priv * priv)201 static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
202 {
203 struct mlx5_device_context *dev_ctx;
204 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
205
206 dev_ctx = mlx5_get_device(intf, priv);
207 if (!dev_ctx)
208 return;
209
210 delayed_event_start(priv);
211 if (intf->attach) {
212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
213 goto out;
214 if (intf->attach(dev, dev_ctx->context))
215 goto out;
216
217 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
218 } else {
219 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
220 goto out;
221 dev_ctx->context = intf->add(dev);
222 if (!dev_ctx->context)
223 goto out;
224
225 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
226 }
227
228 out:
229 delayed_event_release(dev_ctx, priv);
230 }
231
mlx5_attach_device(struct mlx5_core_dev * dev)232 void mlx5_attach_device(struct mlx5_core_dev *dev)
233 {
234 struct mlx5_priv *priv = &dev->priv;
235 struct mlx5_interface *intf;
236
237 mutex_lock(&mlx5_intf_mutex);
238 list_for_each_entry(intf, &intf_list, list)
239 mlx5_attach_interface(intf, priv);
240 mutex_unlock(&mlx5_intf_mutex);
241 }
242
mlx5_detach_interface(struct mlx5_interface * intf,struct mlx5_priv * priv)243 static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
244 {
245 struct mlx5_device_context *dev_ctx;
246 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
247
248 dev_ctx = mlx5_get_device(intf, priv);
249 if (!dev_ctx)
250 return;
251
252 if (intf->detach) {
253 if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
254 return;
255 intf->detach(dev, dev_ctx->context);
256 clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
257 } else {
258 if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
259 return;
260 intf->remove(dev, dev_ctx->context);
261 clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
262 }
263 }
264
mlx5_detach_device(struct mlx5_core_dev * dev)265 void mlx5_detach_device(struct mlx5_core_dev *dev)
266 {
267 struct mlx5_priv *priv = &dev->priv;
268 struct mlx5_interface *intf;
269
270 mutex_lock(&mlx5_intf_mutex);
271 list_for_each_entry(intf, &intf_list, list)
272 mlx5_detach_interface(intf, priv);
273 mutex_unlock(&mlx5_intf_mutex);
274 }
275
mlx5_device_registered(struct mlx5_core_dev * dev)276 bool mlx5_device_registered(struct mlx5_core_dev *dev)
277 {
278 struct mlx5_priv *priv;
279 bool found = false;
280
281 mutex_lock(&mlx5_intf_mutex);
282 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
283 if (priv == &dev->priv)
284 found = true;
285 mutex_unlock(&mlx5_intf_mutex);
286
287 return found;
288 }
289
mlx5_register_device(struct mlx5_core_dev * dev)290 int mlx5_register_device(struct mlx5_core_dev *dev)
291 {
292 struct mlx5_priv *priv = &dev->priv;
293 struct mlx5_interface *intf;
294
295 mutex_lock(&mlx5_intf_mutex);
296 list_add_tail(&priv->dev_list, &mlx5_dev_list);
297 list_for_each_entry(intf, &intf_list, list)
298 mlx5_add_device(intf, priv);
299 mutex_unlock(&mlx5_intf_mutex);
300
301 return 0;
302 }
303
mlx5_unregister_device(struct mlx5_core_dev * dev)304 void mlx5_unregister_device(struct mlx5_core_dev *dev)
305 {
306 struct mlx5_priv *priv = &dev->priv;
307 struct mlx5_interface *intf;
308
309 mutex_lock(&mlx5_intf_mutex);
310 list_for_each_entry_reverse(intf, &intf_list, list)
311 mlx5_remove_device(intf, priv);
312 list_del(&priv->dev_list);
313 mutex_unlock(&mlx5_intf_mutex);
314 }
315
mlx5_register_interface(struct mlx5_interface * intf)316 int mlx5_register_interface(struct mlx5_interface *intf)
317 {
318 struct mlx5_priv *priv;
319
320 if (!intf->add || !intf->remove)
321 return -EINVAL;
322
323 mutex_lock(&mlx5_intf_mutex);
324 list_add_tail(&intf->list, &intf_list);
325 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
326 mlx5_add_device(intf, priv);
327 mutex_unlock(&mlx5_intf_mutex);
328
329 return 0;
330 }
331 EXPORT_SYMBOL(mlx5_register_interface);
332
mlx5_unregister_interface(struct mlx5_interface * intf)333 void mlx5_unregister_interface(struct mlx5_interface *intf)
334 {
335 struct mlx5_priv *priv;
336
337 mutex_lock(&mlx5_intf_mutex);
338 list_for_each_entry(priv, &mlx5_dev_list, dev_list)
339 mlx5_remove_device(intf, priv);
340 list_del(&intf->list);
341 mutex_unlock(&mlx5_intf_mutex);
342 }
343 EXPORT_SYMBOL(mlx5_unregister_interface);
344
mlx5_get_protocol_dev(struct mlx5_core_dev * mdev,int protocol)345 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
346 {
347 struct mlx5_priv *priv = &mdev->priv;
348 struct mlx5_device_context *dev_ctx;
349 unsigned long flags;
350 void *result = NULL;
351
352 spin_lock_irqsave(&priv->ctx_lock, flags);
353
354 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
355 if ((dev_ctx->intf->protocol == protocol) &&
356 dev_ctx->intf->get_dev) {
357 result = dev_ctx->intf->get_dev(dev_ctx->context);
358 break;
359 }
360
361 spin_unlock_irqrestore(&priv->ctx_lock, flags);
362
363 return result;
364 }
365 EXPORT_SYMBOL(mlx5_get_protocol_dev);
366
367 /* Must be called with intf_mutex held */
mlx5_add_dev_by_protocol(struct mlx5_core_dev * dev,int protocol)368 void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
369 {
370 struct mlx5_interface *intf;
371
372 list_for_each_entry(intf, &intf_list, list)
373 if (intf->protocol == protocol) {
374 mlx5_add_device(intf, &dev->priv);
375 break;
376 }
377 }
378
379 /* Must be called with intf_mutex held */
mlx5_remove_dev_by_protocol(struct mlx5_core_dev * dev,int protocol)380 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
381 {
382 struct mlx5_interface *intf;
383
384 list_for_each_entry(intf, &intf_list, list)
385 if (intf->protocol == protocol) {
386 mlx5_remove_device(intf, &dev->priv);
387 break;
388 }
389 }
390
mlx5_gen_pci_id(struct mlx5_core_dev * dev)391 static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
392 {
393 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
394 (dev->pdev->bus->number << 8) |
395 PCI_SLOT(dev->pdev->devfn));
396 }
397
398 /* Must be called with intf_mutex held */
mlx5_get_next_phys_dev(struct mlx5_core_dev * dev)399 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
400 {
401 u32 pci_id = mlx5_gen_pci_id(dev);
402 struct mlx5_core_dev *res = NULL;
403 struct mlx5_core_dev *tmp_dev;
404 struct mlx5_priv *priv;
405
406 list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
407 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
408 if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
409 res = tmp_dev;
410 break;
411 }
412 }
413
414 return res;
415 }
416
mlx5_core_event(struct mlx5_core_dev * dev,enum mlx5_dev_event event,unsigned long param)417 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
418 unsigned long param)
419 {
420 struct mlx5_priv *priv = &dev->priv;
421 struct mlx5_device_context *dev_ctx;
422 unsigned long flags;
423
424 spin_lock_irqsave(&priv->ctx_lock, flags);
425
426 if (priv->is_accum_events)
427 add_delayed_event(priv, dev, event, param);
428
429 /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
430 * still in priv->ctx_list. In this case, only notify the dev_ctx if its
431 * ADDED or ATTACHED bit are set.
432 */
433 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
434 if (dev_ctx->intf->event &&
435 (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
436 test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
437 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
438
439 spin_unlock_irqrestore(&priv->ctx_lock, flags);
440 }
441
442 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
mlx5_core_page_fault(struct mlx5_core_dev * dev,struct mlx5_pagefault * pfault)443 void mlx5_core_page_fault(struct mlx5_core_dev *dev,
444 struct mlx5_pagefault *pfault)
445 {
446 struct mlx5_priv *priv = &dev->priv;
447 int srcu_idx;
448
449 srcu_idx = srcu_read_lock(&priv->pfault_srcu);
450 if (priv->pfault)
451 priv->pfault(dev, priv->pfault_ctx, pfault);
452 srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
453 }
454 #endif
455
mlx5_dev_list_lock(void)456 void mlx5_dev_list_lock(void)
457 {
458 mutex_lock(&mlx5_intf_mutex);
459 }
460
mlx5_dev_list_unlock(void)461 void mlx5_dev_list_unlock(void)
462 {
463 mutex_unlock(&mlx5_intf_mutex);
464 }
465
mlx5_dev_list_trylock(void)466 int mlx5_dev_list_trylock(void)
467 {
468 return mutex_trylock(&mlx5_intf_mutex);
469 }
470