1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/security.h>
34 #include <linux/completion.h>
35 #include <linux/list.h>
36
37 #include <rdma/ib_verbs.h>
38 #include <rdma/ib_cache.h>
39 #include "core_priv.h"
40 #include "mad_priv.h"
41
get_pkey_idx_qp_list(struct ib_port_pkey * pp)42 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
43 {
44 struct pkey_index_qp_list *pkey = NULL;
45 struct pkey_index_qp_list *tmp_pkey;
46 struct ib_device *dev = pp->sec->dev;
47
48 spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
49 list_for_each_entry(tmp_pkey,
50 &dev->port_pkey_list[pp->port_num].pkey_list,
51 pkey_index_list) {
52 if (tmp_pkey->pkey_index == pp->pkey_index) {
53 pkey = tmp_pkey;
54 break;
55 }
56 }
57 spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
58 return pkey;
59 }
60
get_pkey_and_subnet_prefix(struct ib_port_pkey * pp,u16 * pkey,u64 * subnet_prefix)61 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
62 u16 *pkey,
63 u64 *subnet_prefix)
64 {
65 struct ib_device *dev = pp->sec->dev;
66 int ret;
67
68 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
69 if (ret)
70 return ret;
71
72 ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
73
74 return ret;
75 }
76
enforce_qp_pkey_security(u16 pkey,u64 subnet_prefix,struct ib_qp_security * qp_sec)77 static int enforce_qp_pkey_security(u16 pkey,
78 u64 subnet_prefix,
79 struct ib_qp_security *qp_sec)
80 {
81 struct ib_qp_security *shared_qp_sec;
82 int ret;
83
84 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
85 if (ret)
86 return ret;
87
88 list_for_each_entry(shared_qp_sec,
89 &qp_sec->shared_qp_list,
90 shared_qp_list) {
91 ret = security_ib_pkey_access(shared_qp_sec->security,
92 subnet_prefix,
93 pkey);
94 if (ret)
95 return ret;
96 }
97 return 0;
98 }
99
100 /* The caller of this function must hold the QP security
101 * mutex of the QP of the security structure in *pps.
102 *
103 * It takes separate ports_pkeys and security structure
104 * because in some cases the pps will be for a new settings
105 * or the pps will be for the real QP and security structure
106 * will be for a shared QP.
107 */
check_qp_port_pkey_settings(struct ib_ports_pkeys * pps,struct ib_qp_security * sec)108 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
109 struct ib_qp_security *sec)
110 {
111 u64 subnet_prefix;
112 u16 pkey;
113 int ret = 0;
114
115 if (!pps)
116 return 0;
117
118 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
119 ret = get_pkey_and_subnet_prefix(&pps->main,
120 &pkey,
121 &subnet_prefix);
122 if (ret)
123 return ret;
124
125 ret = enforce_qp_pkey_security(pkey,
126 subnet_prefix,
127 sec);
128 if (ret)
129 return ret;
130 }
131
132 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
133 ret = get_pkey_and_subnet_prefix(&pps->alt,
134 &pkey,
135 &subnet_prefix);
136 if (ret)
137 return ret;
138
139 ret = enforce_qp_pkey_security(pkey,
140 subnet_prefix,
141 sec);
142 }
143
144 return ret;
145 }
146
147 /* The caller of this function must hold the QP security
148 * mutex.
149 */
qp_to_error(struct ib_qp_security * sec)150 static void qp_to_error(struct ib_qp_security *sec)
151 {
152 struct ib_qp_security *shared_qp_sec;
153 struct ib_qp_attr attr = {
154 .qp_state = IB_QPS_ERR
155 };
156 struct ib_event event = {
157 .event = IB_EVENT_QP_FATAL
158 };
159
160 /* If the QP is in the process of being destroyed
161 * the qp pointer in the security structure is
162 * undefined. It cannot be modified now.
163 */
164 if (sec->destroying)
165 return;
166
167 ib_modify_qp(sec->qp,
168 &attr,
169 IB_QP_STATE);
170
171 if (sec->qp->event_handler && sec->qp->qp_context) {
172 event.element.qp = sec->qp;
173 sec->qp->event_handler(&event,
174 sec->qp->qp_context);
175 }
176
177 list_for_each_entry(shared_qp_sec,
178 &sec->shared_qp_list,
179 shared_qp_list) {
180 struct ib_qp *qp = shared_qp_sec->qp;
181
182 if (qp->event_handler && qp->qp_context) {
183 event.element.qp = qp;
184 event.device = qp->device;
185 qp->event_handler(&event,
186 qp->qp_context);
187 }
188 }
189 }
190
check_pkey_qps(struct pkey_index_qp_list * pkey,struct ib_device * device,u8 port_num,u64 subnet_prefix)191 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
192 struct ib_device *device,
193 u8 port_num,
194 u64 subnet_prefix)
195 {
196 struct ib_port_pkey *pp, *tmp_pp;
197 bool comp;
198 LIST_HEAD(to_error_list);
199 u16 pkey_val;
200
201 if (!ib_get_cached_pkey(device,
202 port_num,
203 pkey->pkey_index,
204 &pkey_val)) {
205 spin_lock(&pkey->qp_list_lock);
206 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
207 if (atomic_read(&pp->sec->error_list_count))
208 continue;
209
210 if (enforce_qp_pkey_security(pkey_val,
211 subnet_prefix,
212 pp->sec)) {
213 atomic_inc(&pp->sec->error_list_count);
214 list_add(&pp->to_error_list,
215 &to_error_list);
216 }
217 }
218 spin_unlock(&pkey->qp_list_lock);
219 }
220
221 list_for_each_entry_safe(pp,
222 tmp_pp,
223 &to_error_list,
224 to_error_list) {
225 mutex_lock(&pp->sec->mutex);
226 qp_to_error(pp->sec);
227 list_del(&pp->to_error_list);
228 atomic_dec(&pp->sec->error_list_count);
229 comp = pp->sec->destroying;
230 mutex_unlock(&pp->sec->mutex);
231
232 if (comp)
233 complete(&pp->sec->error_complete);
234 }
235 }
236
237 /* The caller of this function must hold the QP security
238 * mutex.
239 */
port_pkey_list_insert(struct ib_port_pkey * pp)240 static int port_pkey_list_insert(struct ib_port_pkey *pp)
241 {
242 struct pkey_index_qp_list *tmp_pkey;
243 struct pkey_index_qp_list *pkey;
244 struct ib_device *dev;
245 u8 port_num = pp->port_num;
246 int ret = 0;
247
248 if (pp->state != IB_PORT_PKEY_VALID)
249 return 0;
250
251 dev = pp->sec->dev;
252
253 pkey = get_pkey_idx_qp_list(pp);
254
255 if (!pkey) {
256 bool found = false;
257
258 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
259 if (!pkey)
260 return -ENOMEM;
261
262 spin_lock(&dev->port_pkey_list[port_num].list_lock);
263 /* Check for the PKey again. A racing process may
264 * have created it.
265 */
266 list_for_each_entry(tmp_pkey,
267 &dev->port_pkey_list[port_num].pkey_list,
268 pkey_index_list) {
269 if (tmp_pkey->pkey_index == pp->pkey_index) {
270 kfree(pkey);
271 pkey = tmp_pkey;
272 found = true;
273 break;
274 }
275 }
276
277 if (!found) {
278 pkey->pkey_index = pp->pkey_index;
279 spin_lock_init(&pkey->qp_list_lock);
280 INIT_LIST_HEAD(&pkey->qp_list);
281 list_add(&pkey->pkey_index_list,
282 &dev->port_pkey_list[port_num].pkey_list);
283 }
284 spin_unlock(&dev->port_pkey_list[port_num].list_lock);
285 }
286
287 spin_lock(&pkey->qp_list_lock);
288 list_add(&pp->qp_list, &pkey->qp_list);
289 spin_unlock(&pkey->qp_list_lock);
290
291 pp->state = IB_PORT_PKEY_LISTED;
292
293 return ret;
294 }
295
296 /* The caller of this function must hold the QP security
297 * mutex.
298 */
port_pkey_list_remove(struct ib_port_pkey * pp)299 static void port_pkey_list_remove(struct ib_port_pkey *pp)
300 {
301 struct pkey_index_qp_list *pkey;
302
303 if (pp->state != IB_PORT_PKEY_LISTED)
304 return;
305
306 pkey = get_pkey_idx_qp_list(pp);
307
308 spin_lock(&pkey->qp_list_lock);
309 list_del(&pp->qp_list);
310 spin_unlock(&pkey->qp_list_lock);
311
312 /* The setting may still be valid, i.e. after
313 * a destroy has failed for example.
314 */
315 pp->state = IB_PORT_PKEY_VALID;
316 }
317
destroy_qp_security(struct ib_qp_security * sec)318 static void destroy_qp_security(struct ib_qp_security *sec)
319 {
320 security_ib_free_security(sec->security);
321 kfree(sec->ports_pkeys);
322 kfree(sec);
323 }
324
325 /* The caller of this function must hold the QP security
326 * mutex.
327 */
get_new_pps(const struct ib_qp * qp,const struct ib_qp_attr * qp_attr,int qp_attr_mask)328 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
329 const struct ib_qp_attr *qp_attr,
330 int qp_attr_mask)
331 {
332 struct ib_ports_pkeys *new_pps;
333 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
334
335 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
336 if (!new_pps)
337 return NULL;
338
339 if (qp_attr_mask & IB_QP_PORT)
340 new_pps->main.port_num = qp_attr->port_num;
341 else if (qp_pps)
342 new_pps->main.port_num = qp_pps->main.port_num;
343
344 if (qp_attr_mask & IB_QP_PKEY_INDEX)
345 new_pps->main.pkey_index = qp_attr->pkey_index;
346 else if (qp_pps)
347 new_pps->main.pkey_index = qp_pps->main.pkey_index;
348
349 if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
350 (qp_attr_mask & IB_QP_PORT)) ||
351 (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
352 new_pps->main.state = IB_PORT_PKEY_VALID;
353
354 if (qp_attr_mask & IB_QP_ALT_PATH) {
355 new_pps->alt.port_num = qp_attr->alt_port_num;
356 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
357 new_pps->alt.state = IB_PORT_PKEY_VALID;
358 } else if (qp_pps) {
359 new_pps->alt.port_num = qp_pps->alt.port_num;
360 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
361 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
362 new_pps->alt.state = IB_PORT_PKEY_VALID;
363 }
364
365 new_pps->main.sec = qp->qp_sec;
366 new_pps->alt.sec = qp->qp_sec;
367 return new_pps;
368 }
369
ib_open_shared_qp_security(struct ib_qp * qp,struct ib_device * dev)370 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
371 {
372 struct ib_qp *real_qp = qp->real_qp;
373 int ret;
374
375 ret = ib_create_qp_security(qp, dev);
376
377 if (ret)
378 return ret;
379
380 if (!qp->qp_sec)
381 return 0;
382
383 mutex_lock(&real_qp->qp_sec->mutex);
384 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
385 qp->qp_sec);
386
387 if (ret)
388 goto ret;
389
390 if (qp != real_qp)
391 list_add(&qp->qp_sec->shared_qp_list,
392 &real_qp->qp_sec->shared_qp_list);
393 ret:
394 mutex_unlock(&real_qp->qp_sec->mutex);
395 if (ret)
396 destroy_qp_security(qp->qp_sec);
397
398 return ret;
399 }
400
ib_close_shared_qp_security(struct ib_qp_security * sec)401 void ib_close_shared_qp_security(struct ib_qp_security *sec)
402 {
403 struct ib_qp *real_qp = sec->qp->real_qp;
404
405 mutex_lock(&real_qp->qp_sec->mutex);
406 list_del(&sec->shared_qp_list);
407 mutex_unlock(&real_qp->qp_sec->mutex);
408
409 destroy_qp_security(sec);
410 }
411
ib_create_qp_security(struct ib_qp * qp,struct ib_device * dev)412 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
413 {
414 u8 i = rdma_start_port(dev);
415 bool is_ib = false;
416 int ret;
417
418 while (i <= rdma_end_port(dev) && !is_ib)
419 is_ib = rdma_protocol_ib(dev, i++);
420
421 /* If this isn't an IB device don't create the security context */
422 if (!is_ib)
423 return 0;
424
425 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
426 if (!qp->qp_sec)
427 return -ENOMEM;
428
429 qp->qp_sec->qp = qp;
430 qp->qp_sec->dev = dev;
431 mutex_init(&qp->qp_sec->mutex);
432 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
433 atomic_set(&qp->qp_sec->error_list_count, 0);
434 init_completion(&qp->qp_sec->error_complete);
435 ret = security_ib_alloc_security(&qp->qp_sec->security);
436 if (ret) {
437 kfree(qp->qp_sec);
438 qp->qp_sec = NULL;
439 }
440
441 return ret;
442 }
443 EXPORT_SYMBOL(ib_create_qp_security);
444
ib_destroy_qp_security_begin(struct ib_qp_security * sec)445 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
446 {
447 /* Return if not IB */
448 if (!sec)
449 return;
450
451 mutex_lock(&sec->mutex);
452
453 /* Remove the QP from the lists so it won't get added to
454 * a to_error_list during the destroy process.
455 */
456 if (sec->ports_pkeys) {
457 port_pkey_list_remove(&sec->ports_pkeys->main);
458 port_pkey_list_remove(&sec->ports_pkeys->alt);
459 }
460
461 /* If the QP is already in one or more of those lists
462 * the destroying flag will ensure the to error flow
463 * doesn't operate on an undefined QP.
464 */
465 sec->destroying = true;
466
467 /* Record the error list count to know how many completions
468 * to wait for.
469 */
470 sec->error_comps_pending = atomic_read(&sec->error_list_count);
471
472 mutex_unlock(&sec->mutex);
473 }
474
ib_destroy_qp_security_abort(struct ib_qp_security * sec)475 void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
476 {
477 int ret;
478 int i;
479
480 /* Return if not IB */
481 if (!sec)
482 return;
483
484 /* If a concurrent cache update is in progress this
485 * QP security could be marked for an error state
486 * transition. Wait for this to complete.
487 */
488 for (i = 0; i < sec->error_comps_pending; i++)
489 wait_for_completion(&sec->error_complete);
490
491 mutex_lock(&sec->mutex);
492 sec->destroying = false;
493
494 /* Restore the position in the lists and verify
495 * access is still allowed in case a cache update
496 * occurred while attempting to destroy.
497 *
498 * Because these setting were listed already
499 * and removed during ib_destroy_qp_security_begin
500 * we know the pkey_index_qp_list for the PKey
501 * already exists so port_pkey_list_insert won't fail.
502 */
503 if (sec->ports_pkeys) {
504 port_pkey_list_insert(&sec->ports_pkeys->main);
505 port_pkey_list_insert(&sec->ports_pkeys->alt);
506 }
507
508 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
509 if (ret)
510 qp_to_error(sec);
511
512 mutex_unlock(&sec->mutex);
513 }
514
ib_destroy_qp_security_end(struct ib_qp_security * sec)515 void ib_destroy_qp_security_end(struct ib_qp_security *sec)
516 {
517 int i;
518
519 /* Return if not IB */
520 if (!sec)
521 return;
522
523 /* If a concurrent cache update is occurring we must
524 * wait until this QP security structure is processed
525 * in the QP to error flow before destroying it because
526 * the to_error_list is in use.
527 */
528 for (i = 0; i < sec->error_comps_pending; i++)
529 wait_for_completion(&sec->error_complete);
530
531 destroy_qp_security(sec);
532 }
533
ib_security_cache_change(struct ib_device * device,u8 port_num,u64 subnet_prefix)534 void ib_security_cache_change(struct ib_device *device,
535 u8 port_num,
536 u64 subnet_prefix)
537 {
538 struct pkey_index_qp_list *pkey;
539
540 list_for_each_entry(pkey,
541 &device->port_pkey_list[port_num].pkey_list,
542 pkey_index_list) {
543 check_pkey_qps(pkey,
544 device,
545 port_num,
546 subnet_prefix);
547 }
548 }
549
ib_security_destroy_port_pkey_list(struct ib_device * device)550 void ib_security_destroy_port_pkey_list(struct ib_device *device)
551 {
552 struct pkey_index_qp_list *pkey, *tmp_pkey;
553 int i;
554
555 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
556 spin_lock(&device->port_pkey_list[i].list_lock);
557 list_for_each_entry_safe(pkey,
558 tmp_pkey,
559 &device->port_pkey_list[i].pkey_list,
560 pkey_index_list) {
561 list_del(&pkey->pkey_index_list);
562 kfree(pkey);
563 }
564 spin_unlock(&device->port_pkey_list[i].list_lock);
565 }
566 }
567
ib_security_modify_qp(struct ib_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)568 int ib_security_modify_qp(struct ib_qp *qp,
569 struct ib_qp_attr *qp_attr,
570 int qp_attr_mask,
571 struct ib_udata *udata)
572 {
573 int ret = 0;
574 struct ib_ports_pkeys *tmp_pps;
575 struct ib_ports_pkeys *new_pps = NULL;
576 struct ib_qp *real_qp = qp->real_qp;
577 bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
578 real_qp->qp_type == IB_QPT_GSI ||
579 real_qp->qp_type >= IB_QPT_RESERVED1);
580 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
581 (qp_attr_mask & IB_QP_ALT_PATH));
582
583 WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
584 rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
585 !real_qp->qp_sec),
586 "%s: QP security is not initialized for IB QP: %d\n",
587 __func__, real_qp->qp_num);
588
589 /* The port/pkey settings are maintained only for the real QP. Open
590 * handles on the real QP will be in the shared_qp_list. When
591 * enforcing security on the real QP all the shared QPs will be
592 * checked as well.
593 */
594
595 if (pps_change && !special_qp && real_qp->qp_sec) {
596 mutex_lock(&real_qp->qp_sec->mutex);
597 new_pps = get_new_pps(real_qp,
598 qp_attr,
599 qp_attr_mask);
600 if (!new_pps) {
601 mutex_unlock(&real_qp->qp_sec->mutex);
602 return -ENOMEM;
603 }
604 /* Add this QP to the lists for the new port
605 * and pkey settings before checking for permission
606 * in case there is a concurrent cache update
607 * occurring. Walking the list for a cache change
608 * doesn't acquire the security mutex unless it's
609 * sending the QP to error.
610 */
611 ret = port_pkey_list_insert(&new_pps->main);
612
613 if (!ret)
614 ret = port_pkey_list_insert(&new_pps->alt);
615
616 if (!ret)
617 ret = check_qp_port_pkey_settings(new_pps,
618 real_qp->qp_sec);
619 }
620
621 if (!ret)
622 ret = real_qp->device->modify_qp(real_qp,
623 qp_attr,
624 qp_attr_mask,
625 udata);
626
627 if (new_pps) {
628 /* Clean up the lists and free the appropriate
629 * ports_pkeys structure.
630 */
631 if (ret) {
632 tmp_pps = new_pps;
633 } else {
634 tmp_pps = real_qp->qp_sec->ports_pkeys;
635 real_qp->qp_sec->ports_pkeys = new_pps;
636 }
637
638 if (tmp_pps) {
639 port_pkey_list_remove(&tmp_pps->main);
640 port_pkey_list_remove(&tmp_pps->alt);
641 }
642 kfree(tmp_pps);
643 mutex_unlock(&real_qp->qp_sec->mutex);
644 }
645 return ret;
646 }
647
ib_security_pkey_access(struct ib_device * dev,u8 port_num,u16 pkey_index,void * sec)648 static int ib_security_pkey_access(struct ib_device *dev,
649 u8 port_num,
650 u16 pkey_index,
651 void *sec)
652 {
653 u64 subnet_prefix;
654 u16 pkey;
655 int ret;
656
657 if (!rdma_protocol_ib(dev, port_num))
658 return 0;
659
660 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
661 if (ret)
662 return ret;
663
664 ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
665
666 if (ret)
667 return ret;
668
669 return security_ib_pkey_access(sec, subnet_prefix, pkey);
670 }
671
ib_mad_agent_security_change(struct notifier_block * nb,unsigned long event,void * data)672 static int ib_mad_agent_security_change(struct notifier_block *nb,
673 unsigned long event,
674 void *data)
675 {
676 struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
677
678 if (event != LSM_POLICY_CHANGE)
679 return NOTIFY_DONE;
680
681 ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
682 ag->device->name,
683 ag->port_num);
684
685 return NOTIFY_OK;
686 }
687
ib_mad_agent_security_setup(struct ib_mad_agent * agent,enum ib_qp_type qp_type)688 int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
689 enum ib_qp_type qp_type)
690 {
691 int ret;
692
693 if (!rdma_protocol_ib(agent->device, agent->port_num))
694 return 0;
695
696 ret = security_ib_alloc_security(&agent->security);
697 if (ret)
698 return ret;
699
700 if (qp_type != IB_QPT_SMI)
701 return 0;
702
703 ret = security_ib_endport_manage_subnet(agent->security,
704 agent->device->name,
705 agent->port_num);
706 if (ret)
707 goto free_security;
708
709 agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
710 ret = register_lsm_notifier(&agent->lsm_nb);
711 if (ret)
712 goto free_security;
713
714 agent->smp_allowed = true;
715 agent->lsm_nb_reg = true;
716 return 0;
717
718 free_security:
719 security_ib_free_security(agent->security);
720 return ret;
721 }
722
ib_mad_agent_security_cleanup(struct ib_mad_agent * agent)723 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
724 {
725 if (!rdma_protocol_ib(agent->device, agent->port_num))
726 return;
727
728 if (agent->lsm_nb_reg)
729 unregister_lsm_notifier(&agent->lsm_nb);
730
731 security_ib_free_security(agent->security);
732 }
733
ib_mad_enforce_security(struct ib_mad_agent_private * map,u16 pkey_index)734 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
735 {
736 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
737 return 0;
738
739 if (map->agent.qp->qp_type == IB_QPT_SMI) {
740 if (!map->agent.smp_allowed)
741 return -EACCES;
742 return 0;
743 }
744
745 return ib_security_pkey_access(map->agent.device,
746 map->agent.port_num,
747 pkey_index,
748 map->agent.security);
749 }
750