1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
53 #include <rdma/opa_addr.h>
54 #include "sa.h"
55 #include "core_priv.h"
56
57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
60 #define IB_SA_CPI_MAX_RETRY_CNT 3
61 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
63
64 struct ib_sa_sm_ah {
65 struct ib_ah *ah;
66 struct kref ref;
67 u16 pkey_index;
68 u8 src_path_mask;
69 };
70
71 enum rdma_class_port_info_type {
72 RDMA_CLASS_PORT_INFO_IB,
73 RDMA_CLASS_PORT_INFO_OPA
74 };
75
76 struct rdma_class_port_info {
77 enum rdma_class_port_info_type type;
78 union {
79 struct ib_class_port_info ib;
80 struct opa_class_port_info opa;
81 };
82 };
83
84 struct ib_sa_classport_cache {
85 bool valid;
86 int retry_cnt;
87 struct rdma_class_port_info data;
88 };
89
90 struct ib_sa_port {
91 struct ib_mad_agent *agent;
92 struct ib_sa_sm_ah *sm_ah;
93 struct work_struct update_task;
94 struct ib_sa_classport_cache classport_info;
95 struct delayed_work ib_cpi_work;
96 spinlock_t classport_lock; /* protects class port info set */
97 spinlock_t ah_lock;
98 u8 port_num;
99 };
100
101 struct ib_sa_device {
102 int start_port, end_port;
103 struct ib_event_handler event_handler;
104 struct ib_sa_port port[0];
105 };
106
107 struct ib_sa_query {
108 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
109 void (*release)(struct ib_sa_query *);
110 struct ib_sa_client *client;
111 struct ib_sa_port *port;
112 struct ib_mad_send_buf *mad_buf;
113 struct ib_sa_sm_ah *sm_ah;
114 int id;
115 u32 flags;
116 struct list_head list; /* Local svc request list */
117 u32 seq; /* Local svc request sequence number */
118 unsigned long timeout; /* Local svc timeout */
119 u8 path_use; /* How will the pathrecord be used */
120 };
121
122 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
123 #define IB_SA_CANCEL 0x00000002
124 #define IB_SA_QUERY_OPA 0x00000004
125
126 struct ib_sa_service_query {
127 void (*callback)(int, struct ib_sa_service_rec *, void *);
128 void *context;
129 struct ib_sa_query sa_query;
130 };
131
132 struct ib_sa_path_query {
133 void (*callback)(int, struct sa_path_rec *, void *);
134 void *context;
135 struct ib_sa_query sa_query;
136 struct sa_path_rec *conv_pr;
137 };
138
139 struct ib_sa_guidinfo_query {
140 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
141 void *context;
142 struct ib_sa_query sa_query;
143 };
144
145 struct ib_sa_classport_info_query {
146 void (*callback)(void *);
147 void *context;
148 struct ib_sa_query sa_query;
149 };
150
151 struct ib_sa_mcmember_query {
152 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
153 void *context;
154 struct ib_sa_query sa_query;
155 };
156
157 static LIST_HEAD(ib_nl_request_list);
158 static DEFINE_SPINLOCK(ib_nl_request_lock);
159 static atomic_t ib_nl_sa_request_seq;
160 static struct workqueue_struct *ib_nl_wq;
161 static struct delayed_work ib_nl_timed_work;
162 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
163 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
164 .len = sizeof(struct ib_path_rec_data)},
165 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
166 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
167 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
168 .len = sizeof(struct rdma_nla_ls_gid)},
169 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
170 .len = sizeof(struct rdma_nla_ls_gid)},
171 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
172 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
173 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
174 };
175
176
177 static void ib_sa_add_one(struct ib_device *device);
178 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
179
180 static struct ib_client sa_client = {
181 .name = "sa",
182 .add = ib_sa_add_one,
183 .remove = ib_sa_remove_one
184 };
185
186 static DEFINE_SPINLOCK(idr_lock);
187 static DEFINE_IDR(query_idr);
188
189 static DEFINE_SPINLOCK(tid_lock);
190 static u32 tid;
191
192 #define PATH_REC_FIELD(field) \
193 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
194 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
195 .field_name = "sa_path_rec:" #field
196
197 static const struct ib_field path_rec_table[] = {
198 { PATH_REC_FIELD(service_id),
199 .offset_words = 0,
200 .offset_bits = 0,
201 .size_bits = 64 },
202 { PATH_REC_FIELD(dgid),
203 .offset_words = 2,
204 .offset_bits = 0,
205 .size_bits = 128 },
206 { PATH_REC_FIELD(sgid),
207 .offset_words = 6,
208 .offset_bits = 0,
209 .size_bits = 128 },
210 { PATH_REC_FIELD(ib.dlid),
211 .offset_words = 10,
212 .offset_bits = 0,
213 .size_bits = 16 },
214 { PATH_REC_FIELD(ib.slid),
215 .offset_words = 10,
216 .offset_bits = 16,
217 .size_bits = 16 },
218 { PATH_REC_FIELD(ib.raw_traffic),
219 .offset_words = 11,
220 .offset_bits = 0,
221 .size_bits = 1 },
222 { RESERVED,
223 .offset_words = 11,
224 .offset_bits = 1,
225 .size_bits = 3 },
226 { PATH_REC_FIELD(flow_label),
227 .offset_words = 11,
228 .offset_bits = 4,
229 .size_bits = 20 },
230 { PATH_REC_FIELD(hop_limit),
231 .offset_words = 11,
232 .offset_bits = 24,
233 .size_bits = 8 },
234 { PATH_REC_FIELD(traffic_class),
235 .offset_words = 12,
236 .offset_bits = 0,
237 .size_bits = 8 },
238 { PATH_REC_FIELD(reversible),
239 .offset_words = 12,
240 .offset_bits = 8,
241 .size_bits = 1 },
242 { PATH_REC_FIELD(numb_path),
243 .offset_words = 12,
244 .offset_bits = 9,
245 .size_bits = 7 },
246 { PATH_REC_FIELD(pkey),
247 .offset_words = 12,
248 .offset_bits = 16,
249 .size_bits = 16 },
250 { PATH_REC_FIELD(qos_class),
251 .offset_words = 13,
252 .offset_bits = 0,
253 .size_bits = 12 },
254 { PATH_REC_FIELD(sl),
255 .offset_words = 13,
256 .offset_bits = 12,
257 .size_bits = 4 },
258 { PATH_REC_FIELD(mtu_selector),
259 .offset_words = 13,
260 .offset_bits = 16,
261 .size_bits = 2 },
262 { PATH_REC_FIELD(mtu),
263 .offset_words = 13,
264 .offset_bits = 18,
265 .size_bits = 6 },
266 { PATH_REC_FIELD(rate_selector),
267 .offset_words = 13,
268 .offset_bits = 24,
269 .size_bits = 2 },
270 { PATH_REC_FIELD(rate),
271 .offset_words = 13,
272 .offset_bits = 26,
273 .size_bits = 6 },
274 { PATH_REC_FIELD(packet_life_time_selector),
275 .offset_words = 14,
276 .offset_bits = 0,
277 .size_bits = 2 },
278 { PATH_REC_FIELD(packet_life_time),
279 .offset_words = 14,
280 .offset_bits = 2,
281 .size_bits = 6 },
282 { PATH_REC_FIELD(preference),
283 .offset_words = 14,
284 .offset_bits = 8,
285 .size_bits = 8 },
286 { RESERVED,
287 .offset_words = 14,
288 .offset_bits = 16,
289 .size_bits = 48 },
290 };
291
292 #define OPA_PATH_REC_FIELD(field) \
293 .struct_offset_bytes = \
294 offsetof(struct sa_path_rec, field), \
295 .struct_size_bytes = \
296 sizeof((struct sa_path_rec *)0)->field, \
297 .field_name = "sa_path_rec:" #field
298
299 static const struct ib_field opa_path_rec_table[] = {
300 { OPA_PATH_REC_FIELD(service_id),
301 .offset_words = 0,
302 .offset_bits = 0,
303 .size_bits = 64 },
304 { OPA_PATH_REC_FIELD(dgid),
305 .offset_words = 2,
306 .offset_bits = 0,
307 .size_bits = 128 },
308 { OPA_PATH_REC_FIELD(sgid),
309 .offset_words = 6,
310 .offset_bits = 0,
311 .size_bits = 128 },
312 { OPA_PATH_REC_FIELD(opa.dlid),
313 .offset_words = 10,
314 .offset_bits = 0,
315 .size_bits = 32 },
316 { OPA_PATH_REC_FIELD(opa.slid),
317 .offset_words = 11,
318 .offset_bits = 0,
319 .size_bits = 32 },
320 { OPA_PATH_REC_FIELD(opa.raw_traffic),
321 .offset_words = 12,
322 .offset_bits = 0,
323 .size_bits = 1 },
324 { RESERVED,
325 .offset_words = 12,
326 .offset_bits = 1,
327 .size_bits = 3 },
328 { OPA_PATH_REC_FIELD(flow_label),
329 .offset_words = 12,
330 .offset_bits = 4,
331 .size_bits = 20 },
332 { OPA_PATH_REC_FIELD(hop_limit),
333 .offset_words = 12,
334 .offset_bits = 24,
335 .size_bits = 8 },
336 { OPA_PATH_REC_FIELD(traffic_class),
337 .offset_words = 13,
338 .offset_bits = 0,
339 .size_bits = 8 },
340 { OPA_PATH_REC_FIELD(reversible),
341 .offset_words = 13,
342 .offset_bits = 8,
343 .size_bits = 1 },
344 { OPA_PATH_REC_FIELD(numb_path),
345 .offset_words = 13,
346 .offset_bits = 9,
347 .size_bits = 7 },
348 { OPA_PATH_REC_FIELD(pkey),
349 .offset_words = 13,
350 .offset_bits = 16,
351 .size_bits = 16 },
352 { OPA_PATH_REC_FIELD(opa.l2_8B),
353 .offset_words = 14,
354 .offset_bits = 0,
355 .size_bits = 1 },
356 { OPA_PATH_REC_FIELD(opa.l2_10B),
357 .offset_words = 14,
358 .offset_bits = 1,
359 .size_bits = 1 },
360 { OPA_PATH_REC_FIELD(opa.l2_9B),
361 .offset_words = 14,
362 .offset_bits = 2,
363 .size_bits = 1 },
364 { OPA_PATH_REC_FIELD(opa.l2_16B),
365 .offset_words = 14,
366 .offset_bits = 3,
367 .size_bits = 1 },
368 { RESERVED,
369 .offset_words = 14,
370 .offset_bits = 4,
371 .size_bits = 2 },
372 { OPA_PATH_REC_FIELD(opa.qos_type),
373 .offset_words = 14,
374 .offset_bits = 6,
375 .size_bits = 2 },
376 { OPA_PATH_REC_FIELD(opa.qos_priority),
377 .offset_words = 14,
378 .offset_bits = 8,
379 .size_bits = 8 },
380 { RESERVED,
381 .offset_words = 14,
382 .offset_bits = 16,
383 .size_bits = 3 },
384 { OPA_PATH_REC_FIELD(sl),
385 .offset_words = 14,
386 .offset_bits = 19,
387 .size_bits = 5 },
388 { RESERVED,
389 .offset_words = 14,
390 .offset_bits = 24,
391 .size_bits = 8 },
392 { OPA_PATH_REC_FIELD(mtu_selector),
393 .offset_words = 15,
394 .offset_bits = 0,
395 .size_bits = 2 },
396 { OPA_PATH_REC_FIELD(mtu),
397 .offset_words = 15,
398 .offset_bits = 2,
399 .size_bits = 6 },
400 { OPA_PATH_REC_FIELD(rate_selector),
401 .offset_words = 15,
402 .offset_bits = 8,
403 .size_bits = 2 },
404 { OPA_PATH_REC_FIELD(rate),
405 .offset_words = 15,
406 .offset_bits = 10,
407 .size_bits = 6 },
408 { OPA_PATH_REC_FIELD(packet_life_time_selector),
409 .offset_words = 15,
410 .offset_bits = 16,
411 .size_bits = 2 },
412 { OPA_PATH_REC_FIELD(packet_life_time),
413 .offset_words = 15,
414 .offset_bits = 18,
415 .size_bits = 6 },
416 { OPA_PATH_REC_FIELD(preference),
417 .offset_words = 15,
418 .offset_bits = 24,
419 .size_bits = 8 },
420 };
421
422 #define MCMEMBER_REC_FIELD(field) \
423 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
424 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
425 .field_name = "sa_mcmember_rec:" #field
426
427 static const struct ib_field mcmember_rec_table[] = {
428 { MCMEMBER_REC_FIELD(mgid),
429 .offset_words = 0,
430 .offset_bits = 0,
431 .size_bits = 128 },
432 { MCMEMBER_REC_FIELD(port_gid),
433 .offset_words = 4,
434 .offset_bits = 0,
435 .size_bits = 128 },
436 { MCMEMBER_REC_FIELD(qkey),
437 .offset_words = 8,
438 .offset_bits = 0,
439 .size_bits = 32 },
440 { MCMEMBER_REC_FIELD(mlid),
441 .offset_words = 9,
442 .offset_bits = 0,
443 .size_bits = 16 },
444 { MCMEMBER_REC_FIELD(mtu_selector),
445 .offset_words = 9,
446 .offset_bits = 16,
447 .size_bits = 2 },
448 { MCMEMBER_REC_FIELD(mtu),
449 .offset_words = 9,
450 .offset_bits = 18,
451 .size_bits = 6 },
452 { MCMEMBER_REC_FIELD(traffic_class),
453 .offset_words = 9,
454 .offset_bits = 24,
455 .size_bits = 8 },
456 { MCMEMBER_REC_FIELD(pkey),
457 .offset_words = 10,
458 .offset_bits = 0,
459 .size_bits = 16 },
460 { MCMEMBER_REC_FIELD(rate_selector),
461 .offset_words = 10,
462 .offset_bits = 16,
463 .size_bits = 2 },
464 { MCMEMBER_REC_FIELD(rate),
465 .offset_words = 10,
466 .offset_bits = 18,
467 .size_bits = 6 },
468 { MCMEMBER_REC_FIELD(packet_life_time_selector),
469 .offset_words = 10,
470 .offset_bits = 24,
471 .size_bits = 2 },
472 { MCMEMBER_REC_FIELD(packet_life_time),
473 .offset_words = 10,
474 .offset_bits = 26,
475 .size_bits = 6 },
476 { MCMEMBER_REC_FIELD(sl),
477 .offset_words = 11,
478 .offset_bits = 0,
479 .size_bits = 4 },
480 { MCMEMBER_REC_FIELD(flow_label),
481 .offset_words = 11,
482 .offset_bits = 4,
483 .size_bits = 20 },
484 { MCMEMBER_REC_FIELD(hop_limit),
485 .offset_words = 11,
486 .offset_bits = 24,
487 .size_bits = 8 },
488 { MCMEMBER_REC_FIELD(scope),
489 .offset_words = 12,
490 .offset_bits = 0,
491 .size_bits = 4 },
492 { MCMEMBER_REC_FIELD(join_state),
493 .offset_words = 12,
494 .offset_bits = 4,
495 .size_bits = 4 },
496 { MCMEMBER_REC_FIELD(proxy_join),
497 .offset_words = 12,
498 .offset_bits = 8,
499 .size_bits = 1 },
500 { RESERVED,
501 .offset_words = 12,
502 .offset_bits = 9,
503 .size_bits = 23 },
504 };
505
506 #define SERVICE_REC_FIELD(field) \
507 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
508 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
509 .field_name = "sa_service_rec:" #field
510
511 static const struct ib_field service_rec_table[] = {
512 { SERVICE_REC_FIELD(id),
513 .offset_words = 0,
514 .offset_bits = 0,
515 .size_bits = 64 },
516 { SERVICE_REC_FIELD(gid),
517 .offset_words = 2,
518 .offset_bits = 0,
519 .size_bits = 128 },
520 { SERVICE_REC_FIELD(pkey),
521 .offset_words = 6,
522 .offset_bits = 0,
523 .size_bits = 16 },
524 { SERVICE_REC_FIELD(lease),
525 .offset_words = 7,
526 .offset_bits = 0,
527 .size_bits = 32 },
528 { SERVICE_REC_FIELD(key),
529 .offset_words = 8,
530 .offset_bits = 0,
531 .size_bits = 128 },
532 { SERVICE_REC_FIELD(name),
533 .offset_words = 12,
534 .offset_bits = 0,
535 .size_bits = 64*8 },
536 { SERVICE_REC_FIELD(data8),
537 .offset_words = 28,
538 .offset_bits = 0,
539 .size_bits = 16*8 },
540 { SERVICE_REC_FIELD(data16),
541 .offset_words = 32,
542 .offset_bits = 0,
543 .size_bits = 8*16 },
544 { SERVICE_REC_FIELD(data32),
545 .offset_words = 36,
546 .offset_bits = 0,
547 .size_bits = 4*32 },
548 { SERVICE_REC_FIELD(data64),
549 .offset_words = 40,
550 .offset_bits = 0,
551 .size_bits = 2*64 },
552 };
553
554 #define CLASSPORTINFO_REC_FIELD(field) \
555 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
556 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
557 .field_name = "ib_class_port_info:" #field
558
559 static const struct ib_field ib_classport_info_rec_table[] = {
560 { CLASSPORTINFO_REC_FIELD(base_version),
561 .offset_words = 0,
562 .offset_bits = 0,
563 .size_bits = 8 },
564 { CLASSPORTINFO_REC_FIELD(class_version),
565 .offset_words = 0,
566 .offset_bits = 8,
567 .size_bits = 8 },
568 { CLASSPORTINFO_REC_FIELD(capability_mask),
569 .offset_words = 0,
570 .offset_bits = 16,
571 .size_bits = 16 },
572 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
573 .offset_words = 1,
574 .offset_bits = 0,
575 .size_bits = 32 },
576 { CLASSPORTINFO_REC_FIELD(redirect_gid),
577 .offset_words = 2,
578 .offset_bits = 0,
579 .size_bits = 128 },
580 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
581 .offset_words = 6,
582 .offset_bits = 0,
583 .size_bits = 32 },
584 { CLASSPORTINFO_REC_FIELD(redirect_lid),
585 .offset_words = 7,
586 .offset_bits = 0,
587 .size_bits = 16 },
588 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
589 .offset_words = 7,
590 .offset_bits = 16,
591 .size_bits = 16 },
592
593 { CLASSPORTINFO_REC_FIELD(redirect_qp),
594 .offset_words = 8,
595 .offset_bits = 0,
596 .size_bits = 32 },
597 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
598 .offset_words = 9,
599 .offset_bits = 0,
600 .size_bits = 32 },
601
602 { CLASSPORTINFO_REC_FIELD(trap_gid),
603 .offset_words = 10,
604 .offset_bits = 0,
605 .size_bits = 128 },
606 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
607 .offset_words = 14,
608 .offset_bits = 0,
609 .size_bits = 32 },
610
611 { CLASSPORTINFO_REC_FIELD(trap_lid),
612 .offset_words = 15,
613 .offset_bits = 0,
614 .size_bits = 16 },
615 { CLASSPORTINFO_REC_FIELD(trap_pkey),
616 .offset_words = 15,
617 .offset_bits = 16,
618 .size_bits = 16 },
619
620 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
621 .offset_words = 16,
622 .offset_bits = 0,
623 .size_bits = 32 },
624 { CLASSPORTINFO_REC_FIELD(trap_qkey),
625 .offset_words = 17,
626 .offset_bits = 0,
627 .size_bits = 32 },
628 };
629
630 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
631 .struct_offset_bytes =\
632 offsetof(struct opa_class_port_info, field), \
633 .struct_size_bytes = \
634 sizeof((struct opa_class_port_info *)0)->field, \
635 .field_name = "opa_class_port_info:" #field
636
637 static const struct ib_field opa_classport_info_rec_table[] = {
638 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
639 .offset_words = 0,
640 .offset_bits = 0,
641 .size_bits = 8 },
642 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
643 .offset_words = 0,
644 .offset_bits = 8,
645 .size_bits = 8 },
646 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
647 .offset_words = 0,
648 .offset_bits = 16,
649 .size_bits = 16 },
650 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
651 .offset_words = 1,
652 .offset_bits = 0,
653 .size_bits = 32 },
654 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
655 .offset_words = 2,
656 .offset_bits = 0,
657 .size_bits = 128 },
658 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
659 .offset_words = 6,
660 .offset_bits = 0,
661 .size_bits = 32 },
662 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
663 .offset_words = 7,
664 .offset_bits = 0,
665 .size_bits = 32 },
666 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
667 .offset_words = 8,
668 .offset_bits = 0,
669 .size_bits = 32 },
670 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
671 .offset_words = 9,
672 .offset_bits = 0,
673 .size_bits = 32 },
674 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
675 .offset_words = 10,
676 .offset_bits = 0,
677 .size_bits = 128 },
678 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
679 .offset_words = 14,
680 .offset_bits = 0,
681 .size_bits = 32 },
682 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
683 .offset_words = 15,
684 .offset_bits = 0,
685 .size_bits = 32 },
686 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
687 .offset_words = 16,
688 .offset_bits = 0,
689 .size_bits = 32 },
690 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
691 .offset_words = 17,
692 .offset_bits = 0,
693 .size_bits = 32 },
694 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
695 .offset_words = 18,
696 .offset_bits = 0,
697 .size_bits = 16 },
698 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
699 .offset_words = 18,
700 .offset_bits = 16,
701 .size_bits = 16 },
702 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
703 .offset_words = 19,
704 .offset_bits = 0,
705 .size_bits = 8 },
706 { RESERVED,
707 .offset_words = 19,
708 .offset_bits = 8,
709 .size_bits = 24 },
710 };
711
712 #define GUIDINFO_REC_FIELD(field) \
713 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
714 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
715 .field_name = "sa_guidinfo_rec:" #field
716
717 static const struct ib_field guidinfo_rec_table[] = {
718 { GUIDINFO_REC_FIELD(lid),
719 .offset_words = 0,
720 .offset_bits = 0,
721 .size_bits = 16 },
722 { GUIDINFO_REC_FIELD(block_num),
723 .offset_words = 0,
724 .offset_bits = 16,
725 .size_bits = 8 },
726 { GUIDINFO_REC_FIELD(res1),
727 .offset_words = 0,
728 .offset_bits = 24,
729 .size_bits = 8 },
730 { GUIDINFO_REC_FIELD(res2),
731 .offset_words = 1,
732 .offset_bits = 0,
733 .size_bits = 32 },
734 { GUIDINFO_REC_FIELD(guid_info_list),
735 .offset_words = 2,
736 .offset_bits = 0,
737 .size_bits = 512 },
738 };
739
ib_sa_disable_local_svc(struct ib_sa_query * query)740 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
741 {
742 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
743 }
744
ib_sa_query_cancelled(struct ib_sa_query * query)745 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
746 {
747 return (query->flags & IB_SA_CANCEL);
748 }
749
ib_nl_set_path_rec_attrs(struct sk_buff * skb,struct ib_sa_query * query)750 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
751 struct ib_sa_query *query)
752 {
753 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
754 struct ib_sa_mad *mad = query->mad_buf->mad;
755 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
756 u16 val16;
757 u64 val64;
758 struct rdma_ls_resolve_header *header;
759
760 query->mad_buf->context[1] = NULL;
761
762 /* Construct the family header first */
763 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
764 memcpy(header->device_name, query->port->agent->device->name,
765 LS_DEVICE_NAME_MAX);
766 header->port_num = query->port->port_num;
767
768 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
769 sa_rec->reversible != 0)
770 query->path_use = LS_RESOLVE_PATH_USE_GMP;
771 else
772 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
773 header->path_use = query->path_use;
774
775 /* Now build the attributes */
776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
777 val64 = be64_to_cpu(sa_rec->service_id);
778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
779 sizeof(val64), &val64);
780 }
781 if (comp_mask & IB_SA_PATH_REC_DGID)
782 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
783 sizeof(sa_rec->dgid), &sa_rec->dgid);
784 if (comp_mask & IB_SA_PATH_REC_SGID)
785 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
786 sizeof(sa_rec->sgid), &sa_rec->sgid);
787 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
788 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
789 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
790
791 if (comp_mask & IB_SA_PATH_REC_PKEY) {
792 val16 = be16_to_cpu(sa_rec->pkey);
793 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
794 sizeof(val16), &val16);
795 }
796 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
797 val16 = be16_to_cpu(sa_rec->qos_class);
798 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
799 sizeof(val16), &val16);
800 }
801 }
802
ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)803 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
804 {
805 int len = 0;
806
807 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
808 len += nla_total_size(sizeof(u64));
809 if (comp_mask & IB_SA_PATH_REC_DGID)
810 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
811 if (comp_mask & IB_SA_PATH_REC_SGID)
812 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
813 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
814 len += nla_total_size(sizeof(u8));
815 if (comp_mask & IB_SA_PATH_REC_PKEY)
816 len += nla_total_size(sizeof(u16));
817 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
818 len += nla_total_size(sizeof(u16));
819
820 /*
821 * Make sure that at least some of the required comp_mask bits are
822 * set.
823 */
824 if (WARN_ON(len == 0))
825 return len;
826
827 /* Add the family header */
828 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
829
830 return len;
831 }
832
ib_nl_send_msg(struct ib_sa_query * query,gfp_t gfp_mask)833 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
834 {
835 struct sk_buff *skb = NULL;
836 struct nlmsghdr *nlh;
837 void *data;
838 int ret = 0;
839 struct ib_sa_mad *mad;
840 int len;
841
842 mad = query->mad_buf->mad;
843 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
844 if (len <= 0)
845 return -EMSGSIZE;
846
847 skb = nlmsg_new(len, gfp_mask);
848 if (!skb)
849 return -ENOMEM;
850
851 /* Put nlmsg header only for now */
852 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
853 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
854 if (!data) {
855 nlmsg_free(skb);
856 return -EMSGSIZE;
857 }
858
859 /* Add attributes */
860 ib_nl_set_path_rec_attrs(skb, query);
861
862 /* Repair the nlmsg header length */
863 nlmsg_end(skb, nlh);
864
865 ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
866 if (!ret)
867 ret = len;
868 else
869 ret = 0;
870
871 return ret;
872 }
873
ib_nl_make_request(struct ib_sa_query * query,gfp_t gfp_mask)874 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
875 {
876 unsigned long flags;
877 unsigned long delay;
878 int ret;
879
880 INIT_LIST_HEAD(&query->list);
881 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
882
883 /* Put the request on the list first.*/
884 spin_lock_irqsave(&ib_nl_request_lock, flags);
885 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
886 query->timeout = delay + jiffies;
887 list_add_tail(&query->list, &ib_nl_request_list);
888 /* Start the timeout if this is the only request */
889 if (ib_nl_request_list.next == &query->list)
890 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
891 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
892
893 ret = ib_nl_send_msg(query, gfp_mask);
894 if (ret <= 0) {
895 ret = -EIO;
896 /* Remove the request */
897 spin_lock_irqsave(&ib_nl_request_lock, flags);
898 list_del(&query->list);
899 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
900 } else {
901 ret = 0;
902 }
903
904 return ret;
905 }
906
ib_nl_cancel_request(struct ib_sa_query * query)907 static int ib_nl_cancel_request(struct ib_sa_query *query)
908 {
909 unsigned long flags;
910 struct ib_sa_query *wait_query;
911 int found = 0;
912
913 spin_lock_irqsave(&ib_nl_request_lock, flags);
914 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
915 /* Let the timeout to take care of the callback */
916 if (query == wait_query) {
917 query->flags |= IB_SA_CANCEL;
918 query->timeout = jiffies;
919 list_move(&query->list, &ib_nl_request_list);
920 found = 1;
921 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
922 break;
923 }
924 }
925 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
926
927 return found;
928 }
929
930 static void send_handler(struct ib_mad_agent *agent,
931 struct ib_mad_send_wc *mad_send_wc);
932
ib_nl_process_good_resolve_rsp(struct ib_sa_query * query,const struct nlmsghdr * nlh)933 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
934 const struct nlmsghdr *nlh)
935 {
936 struct ib_mad_send_wc mad_send_wc;
937 struct ib_sa_mad *mad = NULL;
938 const struct nlattr *head, *curr;
939 struct ib_path_rec_data *rec;
940 int len, rem;
941 u32 mask = 0;
942 int status = -EIO;
943
944 if (query->callback) {
945 head = (const struct nlattr *) nlmsg_data(nlh);
946 len = nlmsg_len(nlh);
947 switch (query->path_use) {
948 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
949 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
950 break;
951
952 case LS_RESOLVE_PATH_USE_ALL:
953 case LS_RESOLVE_PATH_USE_GMP:
954 default:
955 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
956 IB_PATH_BIDIRECTIONAL;
957 break;
958 }
959 nla_for_each_attr(curr, head, len, rem) {
960 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
961 rec = nla_data(curr);
962 /*
963 * Get the first one. In the future, we may
964 * need to get up to 6 pathrecords.
965 */
966 if ((rec->flags & mask) == mask) {
967 mad = query->mad_buf->mad;
968 mad->mad_hdr.method |=
969 IB_MGMT_METHOD_RESP;
970 memcpy(mad->data, rec->path_rec,
971 sizeof(rec->path_rec));
972 status = 0;
973 break;
974 }
975 }
976 }
977 query->callback(query, status, mad);
978 }
979
980 mad_send_wc.send_buf = query->mad_buf;
981 mad_send_wc.status = IB_WC_SUCCESS;
982 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
983 }
984
ib_nl_request_timeout(struct work_struct * work)985 static void ib_nl_request_timeout(struct work_struct *work)
986 {
987 unsigned long flags;
988 struct ib_sa_query *query;
989 unsigned long delay;
990 struct ib_mad_send_wc mad_send_wc;
991 int ret;
992
993 spin_lock_irqsave(&ib_nl_request_lock, flags);
994 while (!list_empty(&ib_nl_request_list)) {
995 query = list_entry(ib_nl_request_list.next,
996 struct ib_sa_query, list);
997
998 if (time_after(query->timeout, jiffies)) {
999 delay = query->timeout - jiffies;
1000 if ((long)delay <= 0)
1001 delay = 1;
1002 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
1003 break;
1004 }
1005
1006 list_del(&query->list);
1007 ib_sa_disable_local_svc(query);
1008 /* Hold the lock to protect against query cancellation */
1009 if (ib_sa_query_cancelled(query))
1010 ret = -1;
1011 else
1012 ret = ib_post_send_mad(query->mad_buf, NULL);
1013 if (ret) {
1014 mad_send_wc.send_buf = query->mad_buf;
1015 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1016 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1017 send_handler(query->port->agent, &mad_send_wc);
1018 spin_lock_irqsave(&ib_nl_request_lock, flags);
1019 }
1020 }
1021 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1022 }
1023
ib_nl_handle_set_timeout(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1024 int ib_nl_handle_set_timeout(struct sk_buff *skb,
1025 struct nlmsghdr *nlh,
1026 struct netlink_ext_ack *extack)
1027 {
1028 int timeout, delta, abs_delta;
1029 const struct nlattr *attr;
1030 unsigned long flags;
1031 struct ib_sa_query *query;
1032 long delay = 0;
1033 struct nlattr *tb[LS_NLA_TYPE_MAX];
1034 int ret;
1035
1036 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1037 !(NETLINK_CB(skb).sk))
1038 return -EPERM;
1039
1040 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1041 nlmsg_len(nlh), ib_nl_policy, NULL);
1042 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1043 if (ret || !attr)
1044 goto settimeout_out;
1045
1046 timeout = *(int *) nla_data(attr);
1047 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1048 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1049 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1050 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1051
1052 delta = timeout - sa_local_svc_timeout_ms;
1053 if (delta < 0)
1054 abs_delta = -delta;
1055 else
1056 abs_delta = delta;
1057
1058 if (delta != 0) {
1059 spin_lock_irqsave(&ib_nl_request_lock, flags);
1060 sa_local_svc_timeout_ms = timeout;
1061 list_for_each_entry(query, &ib_nl_request_list, list) {
1062 if (delta < 0 && abs_delta > query->timeout)
1063 query->timeout = 0;
1064 else
1065 query->timeout += delta;
1066
1067 /* Get the new delay from the first entry */
1068 if (!delay) {
1069 delay = query->timeout - jiffies;
1070 if (delay <= 0)
1071 delay = 1;
1072 }
1073 }
1074 if (delay)
1075 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1076 (unsigned long)delay);
1077 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1078 }
1079
1080 settimeout_out:
1081 return 0;
1082 }
1083
ib_nl_is_good_resolve_resp(const struct nlmsghdr * nlh)1084 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1085 {
1086 struct nlattr *tb[LS_NLA_TYPE_MAX];
1087 int ret;
1088
1089 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1090 return 0;
1091
1092 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1093 nlmsg_len(nlh), ib_nl_policy, NULL);
1094 if (ret)
1095 return 0;
1096
1097 return 1;
1098 }
1099
ib_nl_handle_resolve_resp(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1100 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1101 struct nlmsghdr *nlh,
1102 struct netlink_ext_ack *extack)
1103 {
1104 unsigned long flags;
1105 struct ib_sa_query *query;
1106 struct ib_mad_send_buf *send_buf;
1107 struct ib_mad_send_wc mad_send_wc;
1108 int found = 0;
1109 int ret;
1110
1111 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1112 !(NETLINK_CB(skb).sk))
1113 return -EPERM;
1114
1115 spin_lock_irqsave(&ib_nl_request_lock, flags);
1116 list_for_each_entry(query, &ib_nl_request_list, list) {
1117 /*
1118 * If the query is cancelled, let the timeout routine
1119 * take care of it.
1120 */
1121 if (nlh->nlmsg_seq == query->seq) {
1122 found = !ib_sa_query_cancelled(query);
1123 if (found)
1124 list_del(&query->list);
1125 break;
1126 }
1127 }
1128
1129 if (!found) {
1130 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1131 goto resp_out;
1132 }
1133
1134 send_buf = query->mad_buf;
1135
1136 if (!ib_nl_is_good_resolve_resp(nlh)) {
1137 /* if the result is a failure, send out the packet via IB */
1138 ib_sa_disable_local_svc(query);
1139 ret = ib_post_send_mad(query->mad_buf, NULL);
1140 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1141 if (ret) {
1142 mad_send_wc.send_buf = send_buf;
1143 mad_send_wc.status = IB_WC_GENERAL_ERR;
1144 send_handler(query->port->agent, &mad_send_wc);
1145 }
1146 } else {
1147 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1148 ib_nl_process_good_resolve_rsp(query, nlh);
1149 }
1150
1151 resp_out:
1152 return 0;
1153 }
1154
free_sm_ah(struct kref * kref)1155 static void free_sm_ah(struct kref *kref)
1156 {
1157 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1158
1159 rdma_destroy_ah(sm_ah->ah);
1160 kfree(sm_ah);
1161 }
1162
ib_sa_register_client(struct ib_sa_client * client)1163 void ib_sa_register_client(struct ib_sa_client *client)
1164 {
1165 atomic_set(&client->users, 1);
1166 init_completion(&client->comp);
1167 }
1168 EXPORT_SYMBOL(ib_sa_register_client);
1169
ib_sa_unregister_client(struct ib_sa_client * client)1170 void ib_sa_unregister_client(struct ib_sa_client *client)
1171 {
1172 ib_sa_client_put(client);
1173 wait_for_completion(&client->comp);
1174 }
1175 EXPORT_SYMBOL(ib_sa_unregister_client);
1176
1177 /**
1178 * ib_sa_cancel_query - try to cancel an SA query
1179 * @id:ID of query to cancel
1180 * @query:query pointer to cancel
1181 *
1182 * Try to cancel an SA query. If the id and query don't match up or
1183 * the query has already completed, nothing is done. Otherwise the
1184 * query is canceled and will complete with a status of -EINTR.
1185 */
ib_sa_cancel_query(int id,struct ib_sa_query * query)1186 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1187 {
1188 unsigned long flags;
1189 struct ib_mad_agent *agent;
1190 struct ib_mad_send_buf *mad_buf;
1191
1192 spin_lock_irqsave(&idr_lock, flags);
1193 if (idr_find(&query_idr, id) != query) {
1194 spin_unlock_irqrestore(&idr_lock, flags);
1195 return;
1196 }
1197 agent = query->port->agent;
1198 mad_buf = query->mad_buf;
1199 spin_unlock_irqrestore(&idr_lock, flags);
1200
1201 /*
1202 * If the query is still on the netlink request list, schedule
1203 * it to be cancelled by the timeout routine. Otherwise, it has been
1204 * sent to the MAD layer and has to be cancelled from there.
1205 */
1206 if (!ib_nl_cancel_request(query))
1207 ib_cancel_mad(agent, mad_buf);
1208 }
1209 EXPORT_SYMBOL(ib_sa_cancel_query);
1210
get_src_path_mask(struct ib_device * device,u8 port_num)1211 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1212 {
1213 struct ib_sa_device *sa_dev;
1214 struct ib_sa_port *port;
1215 unsigned long flags;
1216 u8 src_path_mask;
1217
1218 sa_dev = ib_get_client_data(device, &sa_client);
1219 if (!sa_dev)
1220 return 0x7f;
1221
1222 port = &sa_dev->port[port_num - sa_dev->start_port];
1223 spin_lock_irqsave(&port->ah_lock, flags);
1224 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1225 spin_unlock_irqrestore(&port->ah_lock, flags);
1226
1227 return src_path_mask;
1228 }
1229
roce_resolve_route_from_path(struct sa_path_rec * rec,const struct ib_gid_attr * attr)1230 static int roce_resolve_route_from_path(struct sa_path_rec *rec,
1231 const struct ib_gid_attr *attr)
1232 {
1233 struct rdma_dev_addr dev_addr = {};
1234 union {
1235 struct sockaddr_in _sockaddr_in;
1236 struct sockaddr_in6 _sockaddr_in6;
1237 } sgid_addr, dgid_addr;
1238 int ret;
1239
1240 if (rec->roce.route_resolved)
1241 return 0;
1242 if (!attr || !attr->ndev)
1243 return -EINVAL;
1244
1245 dev_addr.bound_dev_if = attr->ndev->ifindex;
1246 /* TODO: Use net from the ib_gid_attr once it is added to it,
1247 * until than, limit itself to init_net.
1248 */
1249 dev_addr.net = &init_net;
1250
1251 rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
1252 rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
1253
1254 /* validate the route */
1255 ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
1256 (struct sockaddr *)&dgid_addr, &dev_addr);
1257 if (ret)
1258 return ret;
1259
1260 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1261 dev_addr.network == RDMA_NETWORK_IPV6) &&
1262 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
1263 return -EINVAL;
1264
1265 rec->roce.route_resolved = true;
1266 return 0;
1267 }
1268
init_ah_attr_grh_fields(struct ib_device * device,u8 port_num,struct sa_path_rec * rec,struct rdma_ah_attr * ah_attr,const struct ib_gid_attr * gid_attr)1269 static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
1270 struct sa_path_rec *rec,
1271 struct rdma_ah_attr *ah_attr,
1272 const struct ib_gid_attr *gid_attr)
1273 {
1274 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1275
1276 if (!gid_attr) {
1277 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1278 port_num, NULL);
1279 if (IS_ERR(gid_attr))
1280 return PTR_ERR(gid_attr);
1281 } else
1282 rdma_hold_gid_attr(gid_attr);
1283
1284 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1285 be32_to_cpu(rec->flow_label),
1286 rec->hop_limit, rec->traffic_class,
1287 gid_attr);
1288 return 0;
1289 }
1290
1291 /**
1292 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
1293 * an SA path record.
1294 * @device: Device associated ah attributes initialization.
1295 * @port_num: Port on the specified device.
1296 * @rec: path record entry to use for ah attributes initialization.
1297 * @ah_attr: address handle attributes to initialization from path record.
1298 * @sgid_attr: SGID attribute to consider during initialization.
1299 *
1300 * When ib_init_ah_attr_from_path() returns success,
1301 * (a) for IB link layer it optionally contains a reference to SGID attribute
1302 * when GRH is present for IB link layer.
1303 * (b) for RoCE link layer it contains a reference to SGID attribute.
1304 * User must invoke rdma_destroy_ah_attr() to release reference to SGID
1305 * attributes which are initialized using ib_init_ah_attr_from_path().
1306 */
ib_init_ah_attr_from_path(struct ib_device * device,u8 port_num,struct sa_path_rec * rec,struct rdma_ah_attr * ah_attr,const struct ib_gid_attr * gid_attr)1307 int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1308 struct sa_path_rec *rec,
1309 struct rdma_ah_attr *ah_attr,
1310 const struct ib_gid_attr *gid_attr)
1311 {
1312 int ret = 0;
1313
1314 memset(ah_attr, 0, sizeof(*ah_attr));
1315 ah_attr->type = rdma_ah_find_type(device, port_num);
1316 rdma_ah_set_sl(ah_attr, rec->sl);
1317 rdma_ah_set_port_num(ah_attr, port_num);
1318 rdma_ah_set_static_rate(ah_attr, rec->rate);
1319
1320 if (sa_path_is_roce(rec)) {
1321 ret = roce_resolve_route_from_path(rec, gid_attr);
1322 if (ret)
1323 return ret;
1324
1325 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1326 } else {
1327 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1328 if (sa_path_is_opa(rec) &&
1329 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1330 rdma_ah_set_make_grd(ah_attr, true);
1331
1332 rdma_ah_set_path_bits(ah_attr,
1333 be32_to_cpu(sa_path_get_slid(rec)) &
1334 get_src_path_mask(device, port_num));
1335 }
1336
1337 if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1338 ret = init_ah_attr_grh_fields(device, port_num,
1339 rec, ah_attr, gid_attr);
1340 return ret;
1341 }
1342 EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1343
alloc_mad(struct ib_sa_query * query,gfp_t gfp_mask)1344 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1345 {
1346 struct rdma_ah_attr ah_attr;
1347 unsigned long flags;
1348
1349 spin_lock_irqsave(&query->port->ah_lock, flags);
1350 if (!query->port->sm_ah) {
1351 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1352 return -EAGAIN;
1353 }
1354 kref_get(&query->port->sm_ah->ref);
1355 query->sm_ah = query->port->sm_ah;
1356 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1357
1358 /*
1359 * Always check if sm_ah has valid dlid assigned,
1360 * before querying for class port info
1361 */
1362 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1363 !rdma_is_valid_unicast_lid(&ah_attr)) {
1364 kref_put(&query->sm_ah->ref, free_sm_ah);
1365 return -EAGAIN;
1366 }
1367 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1368 query->sm_ah->pkey_index,
1369 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1370 gfp_mask,
1371 ((query->flags & IB_SA_QUERY_OPA) ?
1372 OPA_MGMT_BASE_VERSION :
1373 IB_MGMT_BASE_VERSION));
1374 if (IS_ERR(query->mad_buf)) {
1375 kref_put(&query->sm_ah->ref, free_sm_ah);
1376 return -ENOMEM;
1377 }
1378
1379 query->mad_buf->ah = query->sm_ah->ah;
1380
1381 return 0;
1382 }
1383
free_mad(struct ib_sa_query * query)1384 static void free_mad(struct ib_sa_query *query)
1385 {
1386 ib_free_send_mad(query->mad_buf);
1387 kref_put(&query->sm_ah->ref, free_sm_ah);
1388 }
1389
init_mad(struct ib_sa_query * query,struct ib_mad_agent * agent)1390 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1391 {
1392 struct ib_sa_mad *mad = query->mad_buf->mad;
1393 unsigned long flags;
1394
1395 memset(mad, 0, sizeof *mad);
1396
1397 if (query->flags & IB_SA_QUERY_OPA) {
1398 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1399 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1400 } else {
1401 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1402 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1403 }
1404 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1405 spin_lock_irqsave(&tid_lock, flags);
1406 mad->mad_hdr.tid =
1407 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1408 spin_unlock_irqrestore(&tid_lock, flags);
1409 }
1410
send_mad(struct ib_sa_query * query,int timeout_ms,gfp_t gfp_mask)1411 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1412 {
1413 bool preload = gfpflags_allow_blocking(gfp_mask);
1414 unsigned long flags;
1415 int ret, id;
1416
1417 if (preload)
1418 idr_preload(gfp_mask);
1419 spin_lock_irqsave(&idr_lock, flags);
1420
1421 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1422
1423 spin_unlock_irqrestore(&idr_lock, flags);
1424 if (preload)
1425 idr_preload_end();
1426 if (id < 0)
1427 return id;
1428
1429 query->mad_buf->timeout_ms = timeout_ms;
1430 query->mad_buf->context[0] = query;
1431 query->id = id;
1432
1433 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1434 (!(query->flags & IB_SA_QUERY_OPA))) {
1435 if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1436 if (!ib_nl_make_request(query, gfp_mask))
1437 return id;
1438 }
1439 ib_sa_disable_local_svc(query);
1440 }
1441
1442 ret = ib_post_send_mad(query->mad_buf, NULL);
1443 if (ret) {
1444 spin_lock_irqsave(&idr_lock, flags);
1445 idr_remove(&query_idr, id);
1446 spin_unlock_irqrestore(&idr_lock, flags);
1447 }
1448
1449 /*
1450 * It's not safe to dereference query any more, because the
1451 * send may already have completed and freed the query in
1452 * another context.
1453 */
1454 return ret ? ret : id;
1455 }
1456
ib_sa_unpack_path(void * attribute,struct sa_path_rec * rec)1457 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1458 {
1459 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1460 }
1461 EXPORT_SYMBOL(ib_sa_unpack_path);
1462
ib_sa_pack_path(struct sa_path_rec * rec,void * attribute)1463 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1464 {
1465 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1466 }
1467 EXPORT_SYMBOL(ib_sa_pack_path);
1468
ib_sa_opa_pathrecord_support(struct ib_sa_client * client,struct ib_device * device,u8 port_num)1469 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1470 struct ib_device *device,
1471 u8 port_num)
1472 {
1473 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1474 struct ib_sa_port *port;
1475 unsigned long flags;
1476 bool ret = false;
1477
1478 if (!sa_dev)
1479 return ret;
1480
1481 port = &sa_dev->port[port_num - sa_dev->start_port];
1482 spin_lock_irqsave(&port->classport_lock, flags);
1483 if (!port->classport_info.valid)
1484 goto ret;
1485
1486 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1487 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1488 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1489 ret:
1490 spin_unlock_irqrestore(&port->classport_lock, flags);
1491 return ret;
1492 }
1493
1494 enum opa_pr_supported {
1495 PR_NOT_SUPPORTED,
1496 PR_OPA_SUPPORTED,
1497 PR_IB_SUPPORTED
1498 };
1499
1500 /**
1501 * Check if current PR query can be an OPA query.
1502 * Retuns PR_NOT_SUPPORTED if a path record query is not
1503 * possible, PR_OPA_SUPPORTED if an OPA path record query
1504 * is possible and PR_IB_SUPPORTED if an IB path record
1505 * query is possible.
1506 */
opa_pr_query_possible(struct ib_sa_client * client,struct ib_device * device,u8 port_num,struct sa_path_rec * rec)1507 static int opa_pr_query_possible(struct ib_sa_client *client,
1508 struct ib_device *device,
1509 u8 port_num,
1510 struct sa_path_rec *rec)
1511 {
1512 struct ib_port_attr port_attr;
1513
1514 if (ib_query_port(device, port_num, &port_attr))
1515 return PR_NOT_SUPPORTED;
1516
1517 if (ib_sa_opa_pathrecord_support(client, device, port_num))
1518 return PR_OPA_SUPPORTED;
1519
1520 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1521 return PR_NOT_SUPPORTED;
1522 else
1523 return PR_IB_SUPPORTED;
1524 }
1525
ib_sa_path_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1526 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1527 int status,
1528 struct ib_sa_mad *mad)
1529 {
1530 struct ib_sa_path_query *query =
1531 container_of(sa_query, struct ib_sa_path_query, sa_query);
1532
1533 if (mad) {
1534 struct sa_path_rec rec;
1535
1536 if (sa_query->flags & IB_SA_QUERY_OPA) {
1537 ib_unpack(opa_path_rec_table,
1538 ARRAY_SIZE(opa_path_rec_table),
1539 mad->data, &rec);
1540 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1541 query->callback(status, &rec, query->context);
1542 } else {
1543 ib_unpack(path_rec_table,
1544 ARRAY_SIZE(path_rec_table),
1545 mad->data, &rec);
1546 rec.rec_type = SA_PATH_REC_TYPE_IB;
1547 sa_path_set_dmac_zero(&rec);
1548
1549 if (query->conv_pr) {
1550 struct sa_path_rec opa;
1551
1552 memset(&opa, 0, sizeof(struct sa_path_rec));
1553 sa_convert_path_ib_to_opa(&opa, &rec);
1554 query->callback(status, &opa, query->context);
1555 } else {
1556 query->callback(status, &rec, query->context);
1557 }
1558 }
1559 } else
1560 query->callback(status, NULL, query->context);
1561 }
1562
ib_sa_path_rec_release(struct ib_sa_query * sa_query)1563 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1564 {
1565 struct ib_sa_path_query *query =
1566 container_of(sa_query, struct ib_sa_path_query, sa_query);
1567
1568 kfree(query->conv_pr);
1569 kfree(query);
1570 }
1571
1572 /**
1573 * ib_sa_path_rec_get - Start a Path get query
1574 * @client:SA client
1575 * @device:device to send query on
1576 * @port_num: port number to send query on
1577 * @rec:Path Record to send in query
1578 * @comp_mask:component mask to send in query
1579 * @timeout_ms:time to wait for response
1580 * @gfp_mask:GFP mask to use for internal allocations
1581 * @callback:function called when query completes, times out or is
1582 * canceled
1583 * @context:opaque user context passed to callback
1584 * @sa_query:query context, used to cancel query
1585 *
1586 * Send a Path Record Get query to the SA to look up a path. The
1587 * callback function will be called when the query completes (or
1588 * fails); status is 0 for a successful response, -EINTR if the query
1589 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1590 * occurred sending the query. The resp parameter of the callback is
1591 * only valid if status is 0.
1592 *
1593 * If the return value of ib_sa_path_rec_get() is negative, it is an
1594 * error code. Otherwise it is a query ID that can be used to cancel
1595 * the query.
1596 */
ib_sa_path_rec_get(struct ib_sa_client * client,struct ib_device * device,u8 port_num,struct sa_path_rec * rec,ib_sa_comp_mask comp_mask,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct sa_path_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1597 int ib_sa_path_rec_get(struct ib_sa_client *client,
1598 struct ib_device *device, u8 port_num,
1599 struct sa_path_rec *rec,
1600 ib_sa_comp_mask comp_mask,
1601 int timeout_ms, gfp_t gfp_mask,
1602 void (*callback)(int status,
1603 struct sa_path_rec *resp,
1604 void *context),
1605 void *context,
1606 struct ib_sa_query **sa_query)
1607 {
1608 struct ib_sa_path_query *query;
1609 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1610 struct ib_sa_port *port;
1611 struct ib_mad_agent *agent;
1612 struct ib_sa_mad *mad;
1613 enum opa_pr_supported status;
1614 int ret;
1615
1616 if (!sa_dev)
1617 return -ENODEV;
1618
1619 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1620 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1621 return -EINVAL;
1622
1623 port = &sa_dev->port[port_num - sa_dev->start_port];
1624 agent = port->agent;
1625
1626 query = kzalloc(sizeof(*query), gfp_mask);
1627 if (!query)
1628 return -ENOMEM;
1629
1630 query->sa_query.port = port;
1631 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1632 status = opa_pr_query_possible(client, device, port_num, rec);
1633 if (status == PR_NOT_SUPPORTED) {
1634 ret = -EINVAL;
1635 goto err1;
1636 } else if (status == PR_OPA_SUPPORTED) {
1637 query->sa_query.flags |= IB_SA_QUERY_OPA;
1638 } else {
1639 query->conv_pr =
1640 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1641 if (!query->conv_pr) {
1642 ret = -ENOMEM;
1643 goto err1;
1644 }
1645 }
1646 }
1647
1648 ret = alloc_mad(&query->sa_query, gfp_mask);
1649 if (ret)
1650 goto err2;
1651
1652 ib_sa_client_get(client);
1653 query->sa_query.client = client;
1654 query->callback = callback;
1655 query->context = context;
1656
1657 mad = query->sa_query.mad_buf->mad;
1658 init_mad(&query->sa_query, agent);
1659
1660 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1661 query->sa_query.release = ib_sa_path_rec_release;
1662 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1663 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1664 mad->sa_hdr.comp_mask = comp_mask;
1665
1666 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1667 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1668 rec, mad->data);
1669 } else if (query->conv_pr) {
1670 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1671 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1672 query->conv_pr, mad->data);
1673 } else {
1674 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1675 rec, mad->data);
1676 }
1677
1678 *sa_query = &query->sa_query;
1679
1680 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1681 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1682 query->conv_pr : rec;
1683
1684 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1685 if (ret < 0)
1686 goto err3;
1687
1688 return ret;
1689
1690 err3:
1691 *sa_query = NULL;
1692 ib_sa_client_put(query->sa_query.client);
1693 free_mad(&query->sa_query);
1694 err2:
1695 kfree(query->conv_pr);
1696 err1:
1697 kfree(query);
1698 return ret;
1699 }
1700 EXPORT_SYMBOL(ib_sa_path_rec_get);
1701
ib_sa_service_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1702 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1703 int status,
1704 struct ib_sa_mad *mad)
1705 {
1706 struct ib_sa_service_query *query =
1707 container_of(sa_query, struct ib_sa_service_query, sa_query);
1708
1709 if (mad) {
1710 struct ib_sa_service_rec rec;
1711
1712 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1713 mad->data, &rec);
1714 query->callback(status, &rec, query->context);
1715 } else
1716 query->callback(status, NULL, query->context);
1717 }
1718
ib_sa_service_rec_release(struct ib_sa_query * sa_query)1719 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1720 {
1721 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1722 }
1723
1724 /**
1725 * ib_sa_service_rec_query - Start Service Record operation
1726 * @client:SA client
1727 * @device:device to send request on
1728 * @port_num: port number to send request on
1729 * @method:SA method - should be get, set, or delete
1730 * @rec:Service Record to send in request
1731 * @comp_mask:component mask to send in request
1732 * @timeout_ms:time to wait for response
1733 * @gfp_mask:GFP mask to use for internal allocations
1734 * @callback:function called when request completes, times out or is
1735 * canceled
1736 * @context:opaque user context passed to callback
1737 * @sa_query:request context, used to cancel request
1738 *
1739 * Send a Service Record set/get/delete to the SA to register,
1740 * unregister or query a service record.
1741 * The callback function will be called when the request completes (or
1742 * fails); status is 0 for a successful response, -EINTR if the query
1743 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1744 * occurred sending the query. The resp parameter of the callback is
1745 * only valid if status is 0.
1746 *
1747 * If the return value of ib_sa_service_rec_query() is negative, it is an
1748 * error code. Otherwise it is a request ID that can be used to cancel
1749 * the query.
1750 */
ib_sa_service_rec_query(struct ib_sa_client * client,struct ib_device * device,u8 port_num,u8 method,struct ib_sa_service_rec * rec,ib_sa_comp_mask comp_mask,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_service_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1751 int ib_sa_service_rec_query(struct ib_sa_client *client,
1752 struct ib_device *device, u8 port_num, u8 method,
1753 struct ib_sa_service_rec *rec,
1754 ib_sa_comp_mask comp_mask,
1755 int timeout_ms, gfp_t gfp_mask,
1756 void (*callback)(int status,
1757 struct ib_sa_service_rec *resp,
1758 void *context),
1759 void *context,
1760 struct ib_sa_query **sa_query)
1761 {
1762 struct ib_sa_service_query *query;
1763 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1764 struct ib_sa_port *port;
1765 struct ib_mad_agent *agent;
1766 struct ib_sa_mad *mad;
1767 int ret;
1768
1769 if (!sa_dev)
1770 return -ENODEV;
1771
1772 port = &sa_dev->port[port_num - sa_dev->start_port];
1773 agent = port->agent;
1774
1775 if (method != IB_MGMT_METHOD_GET &&
1776 method != IB_MGMT_METHOD_SET &&
1777 method != IB_SA_METHOD_DELETE)
1778 return -EINVAL;
1779
1780 query = kzalloc(sizeof(*query), gfp_mask);
1781 if (!query)
1782 return -ENOMEM;
1783
1784 query->sa_query.port = port;
1785 ret = alloc_mad(&query->sa_query, gfp_mask);
1786 if (ret)
1787 goto err1;
1788
1789 ib_sa_client_get(client);
1790 query->sa_query.client = client;
1791 query->callback = callback;
1792 query->context = context;
1793
1794 mad = query->sa_query.mad_buf->mad;
1795 init_mad(&query->sa_query, agent);
1796
1797 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1798 query->sa_query.release = ib_sa_service_rec_release;
1799 mad->mad_hdr.method = method;
1800 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1801 mad->sa_hdr.comp_mask = comp_mask;
1802
1803 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1804 rec, mad->data);
1805
1806 *sa_query = &query->sa_query;
1807
1808 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1809 if (ret < 0)
1810 goto err2;
1811
1812 return ret;
1813
1814 err2:
1815 *sa_query = NULL;
1816 ib_sa_client_put(query->sa_query.client);
1817 free_mad(&query->sa_query);
1818
1819 err1:
1820 kfree(query);
1821 return ret;
1822 }
1823 EXPORT_SYMBOL(ib_sa_service_rec_query);
1824
ib_sa_mcmember_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1825 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1826 int status,
1827 struct ib_sa_mad *mad)
1828 {
1829 struct ib_sa_mcmember_query *query =
1830 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1831
1832 if (mad) {
1833 struct ib_sa_mcmember_rec rec;
1834
1835 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1836 mad->data, &rec);
1837 query->callback(status, &rec, query->context);
1838 } else
1839 query->callback(status, NULL, query->context);
1840 }
1841
ib_sa_mcmember_rec_release(struct ib_sa_query * sa_query)1842 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1843 {
1844 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1845 }
1846
ib_sa_mcmember_rec_query(struct ib_sa_client * client,struct ib_device * device,u8 port_num,u8 method,struct ib_sa_mcmember_rec * rec,ib_sa_comp_mask comp_mask,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_mcmember_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1847 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1848 struct ib_device *device, u8 port_num,
1849 u8 method,
1850 struct ib_sa_mcmember_rec *rec,
1851 ib_sa_comp_mask comp_mask,
1852 int timeout_ms, gfp_t gfp_mask,
1853 void (*callback)(int status,
1854 struct ib_sa_mcmember_rec *resp,
1855 void *context),
1856 void *context,
1857 struct ib_sa_query **sa_query)
1858 {
1859 struct ib_sa_mcmember_query *query;
1860 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1861 struct ib_sa_port *port;
1862 struct ib_mad_agent *agent;
1863 struct ib_sa_mad *mad;
1864 int ret;
1865
1866 if (!sa_dev)
1867 return -ENODEV;
1868
1869 port = &sa_dev->port[port_num - sa_dev->start_port];
1870 agent = port->agent;
1871
1872 query = kzalloc(sizeof(*query), gfp_mask);
1873 if (!query)
1874 return -ENOMEM;
1875
1876 query->sa_query.port = port;
1877 ret = alloc_mad(&query->sa_query, gfp_mask);
1878 if (ret)
1879 goto err1;
1880
1881 ib_sa_client_get(client);
1882 query->sa_query.client = client;
1883 query->callback = callback;
1884 query->context = context;
1885
1886 mad = query->sa_query.mad_buf->mad;
1887 init_mad(&query->sa_query, agent);
1888
1889 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1890 query->sa_query.release = ib_sa_mcmember_rec_release;
1891 mad->mad_hdr.method = method;
1892 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1893 mad->sa_hdr.comp_mask = comp_mask;
1894
1895 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1896 rec, mad->data);
1897
1898 *sa_query = &query->sa_query;
1899
1900 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1901 if (ret < 0)
1902 goto err2;
1903
1904 return ret;
1905
1906 err2:
1907 *sa_query = NULL;
1908 ib_sa_client_put(query->sa_query.client);
1909 free_mad(&query->sa_query);
1910
1911 err1:
1912 kfree(query);
1913 return ret;
1914 }
1915
1916 /* Support GuidInfoRecord */
ib_sa_guidinfo_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)1917 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1918 int status,
1919 struct ib_sa_mad *mad)
1920 {
1921 struct ib_sa_guidinfo_query *query =
1922 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1923
1924 if (mad) {
1925 struct ib_sa_guidinfo_rec rec;
1926
1927 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1928 mad->data, &rec);
1929 query->callback(status, &rec, query->context);
1930 } else
1931 query->callback(status, NULL, query->context);
1932 }
1933
ib_sa_guidinfo_rec_release(struct ib_sa_query * sa_query)1934 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1935 {
1936 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1937 }
1938
ib_sa_guid_info_rec_query(struct ib_sa_client * client,struct ib_device * device,u8 port_num,struct ib_sa_guidinfo_rec * rec,ib_sa_comp_mask comp_mask,u8 method,int timeout_ms,gfp_t gfp_mask,void (* callback)(int status,struct ib_sa_guidinfo_rec * resp,void * context),void * context,struct ib_sa_query ** sa_query)1939 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1940 struct ib_device *device, u8 port_num,
1941 struct ib_sa_guidinfo_rec *rec,
1942 ib_sa_comp_mask comp_mask, u8 method,
1943 int timeout_ms, gfp_t gfp_mask,
1944 void (*callback)(int status,
1945 struct ib_sa_guidinfo_rec *resp,
1946 void *context),
1947 void *context,
1948 struct ib_sa_query **sa_query)
1949 {
1950 struct ib_sa_guidinfo_query *query;
1951 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1952 struct ib_sa_port *port;
1953 struct ib_mad_agent *agent;
1954 struct ib_sa_mad *mad;
1955 int ret;
1956
1957 if (!sa_dev)
1958 return -ENODEV;
1959
1960 if (method != IB_MGMT_METHOD_GET &&
1961 method != IB_MGMT_METHOD_SET &&
1962 method != IB_SA_METHOD_DELETE) {
1963 return -EINVAL;
1964 }
1965
1966 port = &sa_dev->port[port_num - sa_dev->start_port];
1967 agent = port->agent;
1968
1969 query = kzalloc(sizeof(*query), gfp_mask);
1970 if (!query)
1971 return -ENOMEM;
1972
1973 query->sa_query.port = port;
1974 ret = alloc_mad(&query->sa_query, gfp_mask);
1975 if (ret)
1976 goto err1;
1977
1978 ib_sa_client_get(client);
1979 query->sa_query.client = client;
1980 query->callback = callback;
1981 query->context = context;
1982
1983 mad = query->sa_query.mad_buf->mad;
1984 init_mad(&query->sa_query, agent);
1985
1986 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1987 query->sa_query.release = ib_sa_guidinfo_rec_release;
1988
1989 mad->mad_hdr.method = method;
1990 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1991 mad->sa_hdr.comp_mask = comp_mask;
1992
1993 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1994 mad->data);
1995
1996 *sa_query = &query->sa_query;
1997
1998 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1999 if (ret < 0)
2000 goto err2;
2001
2002 return ret;
2003
2004 err2:
2005 *sa_query = NULL;
2006 ib_sa_client_put(query->sa_query.client);
2007 free_mad(&query->sa_query);
2008
2009 err1:
2010 kfree(query);
2011 return ret;
2012 }
2013 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
2014
ib_sa_sendonly_fullmem_support(struct ib_sa_client * client,struct ib_device * device,u8 port_num)2015 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
2016 struct ib_device *device,
2017 u8 port_num)
2018 {
2019 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
2020 struct ib_sa_port *port;
2021 bool ret = false;
2022 unsigned long flags;
2023
2024 if (!sa_dev)
2025 return ret;
2026
2027 port = &sa_dev->port[port_num - sa_dev->start_port];
2028
2029 spin_lock_irqsave(&port->classport_lock, flags);
2030 if ((port->classport_info.valid) &&
2031 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
2032 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
2033 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
2034 spin_unlock_irqrestore(&port->classport_lock, flags);
2035 return ret;
2036 }
2037 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
2038
2039 struct ib_classport_info_context {
2040 struct completion done;
2041 struct ib_sa_query *sa_query;
2042 };
2043
ib_classportinfo_cb(void * context)2044 static void ib_classportinfo_cb(void *context)
2045 {
2046 struct ib_classport_info_context *cb_ctx = context;
2047
2048 complete(&cb_ctx->done);
2049 }
2050
ib_sa_classport_info_rec_callback(struct ib_sa_query * sa_query,int status,struct ib_sa_mad * mad)2051 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
2052 int status,
2053 struct ib_sa_mad *mad)
2054 {
2055 unsigned long flags;
2056 struct ib_sa_classport_info_query *query =
2057 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2058 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2059
2060 if (mad) {
2061 if (sa_query->flags & IB_SA_QUERY_OPA) {
2062 struct opa_class_port_info rec;
2063
2064 ib_unpack(opa_classport_info_rec_table,
2065 ARRAY_SIZE(opa_classport_info_rec_table),
2066 mad->data, &rec);
2067
2068 spin_lock_irqsave(&sa_query->port->classport_lock,
2069 flags);
2070 if (!status && !info->valid) {
2071 memcpy(&info->data.opa, &rec,
2072 sizeof(info->data.opa));
2073
2074 info->valid = true;
2075 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2076 }
2077 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2078 flags);
2079
2080 } else {
2081 struct ib_class_port_info rec;
2082
2083 ib_unpack(ib_classport_info_rec_table,
2084 ARRAY_SIZE(ib_classport_info_rec_table),
2085 mad->data, &rec);
2086
2087 spin_lock_irqsave(&sa_query->port->classport_lock,
2088 flags);
2089 if (!status && !info->valid) {
2090 memcpy(&info->data.ib, &rec,
2091 sizeof(info->data.ib));
2092
2093 info->valid = true;
2094 info->data.type = RDMA_CLASS_PORT_INFO_IB;
2095 }
2096 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2097 flags);
2098 }
2099 }
2100 query->callback(query->context);
2101 }
2102
ib_sa_classport_info_rec_release(struct ib_sa_query * sa_query)2103 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2104 {
2105 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2106 sa_query));
2107 }
2108
ib_sa_classport_info_rec_query(struct ib_sa_port * port,int timeout_ms,void (* callback)(void * context),void * context,struct ib_sa_query ** sa_query)2109 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2110 int timeout_ms,
2111 void (*callback)(void *context),
2112 void *context,
2113 struct ib_sa_query **sa_query)
2114 {
2115 struct ib_mad_agent *agent;
2116 struct ib_sa_classport_info_query *query;
2117 struct ib_sa_mad *mad;
2118 gfp_t gfp_mask = GFP_KERNEL;
2119 int ret;
2120
2121 agent = port->agent;
2122
2123 query = kzalloc(sizeof(*query), gfp_mask);
2124 if (!query)
2125 return -ENOMEM;
2126
2127 query->sa_query.port = port;
2128 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2129 port->port_num) ?
2130 IB_SA_QUERY_OPA : 0;
2131 ret = alloc_mad(&query->sa_query, gfp_mask);
2132 if (ret)
2133 goto err_free;
2134
2135 query->callback = callback;
2136 query->context = context;
2137
2138 mad = query->sa_query.mad_buf->mad;
2139 init_mad(&query->sa_query, agent);
2140
2141 query->sa_query.callback = ib_sa_classport_info_rec_callback;
2142 query->sa_query.release = ib_sa_classport_info_rec_release;
2143 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
2144 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2145 mad->sa_hdr.comp_mask = 0;
2146 *sa_query = &query->sa_query;
2147
2148 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2149 if (ret < 0)
2150 goto err_free_mad;
2151
2152 return ret;
2153
2154 err_free_mad:
2155 *sa_query = NULL;
2156 free_mad(&query->sa_query);
2157
2158 err_free:
2159 kfree(query);
2160 return ret;
2161 }
2162
update_ib_cpi(struct work_struct * work)2163 static void update_ib_cpi(struct work_struct *work)
2164 {
2165 struct ib_sa_port *port =
2166 container_of(work, struct ib_sa_port, ib_cpi_work.work);
2167 struct ib_classport_info_context *cb_context;
2168 unsigned long flags;
2169 int ret;
2170
2171 /* If the classport info is valid, nothing
2172 * to do here.
2173 */
2174 spin_lock_irqsave(&port->classport_lock, flags);
2175 if (port->classport_info.valid) {
2176 spin_unlock_irqrestore(&port->classport_lock, flags);
2177 return;
2178 }
2179 spin_unlock_irqrestore(&port->classport_lock, flags);
2180
2181 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2182 if (!cb_context)
2183 goto err_nomem;
2184
2185 init_completion(&cb_context->done);
2186
2187 ret = ib_sa_classport_info_rec_query(port, 3000,
2188 ib_classportinfo_cb, cb_context,
2189 &cb_context->sa_query);
2190 if (ret < 0)
2191 goto free_cb_err;
2192 wait_for_completion(&cb_context->done);
2193 free_cb_err:
2194 kfree(cb_context);
2195 spin_lock_irqsave(&port->classport_lock, flags);
2196
2197 /* If the classport info is still not valid, the query should have
2198 * failed for some reason. Retry issuing the query
2199 */
2200 if (!port->classport_info.valid) {
2201 port->classport_info.retry_cnt++;
2202 if (port->classport_info.retry_cnt <=
2203 IB_SA_CPI_MAX_RETRY_CNT) {
2204 unsigned long delay =
2205 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2206
2207 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2208 }
2209 }
2210 spin_unlock_irqrestore(&port->classport_lock, flags);
2211
2212 err_nomem:
2213 return;
2214 }
2215
send_handler(struct ib_mad_agent * agent,struct ib_mad_send_wc * mad_send_wc)2216 static void send_handler(struct ib_mad_agent *agent,
2217 struct ib_mad_send_wc *mad_send_wc)
2218 {
2219 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2220 unsigned long flags;
2221
2222 if (query->callback)
2223 switch (mad_send_wc->status) {
2224 case IB_WC_SUCCESS:
2225 /* No callback -- already got recv */
2226 break;
2227 case IB_WC_RESP_TIMEOUT_ERR:
2228 query->callback(query, -ETIMEDOUT, NULL);
2229 break;
2230 case IB_WC_WR_FLUSH_ERR:
2231 query->callback(query, -EINTR, NULL);
2232 break;
2233 default:
2234 query->callback(query, -EIO, NULL);
2235 break;
2236 }
2237
2238 spin_lock_irqsave(&idr_lock, flags);
2239 idr_remove(&query_idr, query->id);
2240 spin_unlock_irqrestore(&idr_lock, flags);
2241
2242 free_mad(query);
2243 if (query->client)
2244 ib_sa_client_put(query->client);
2245 query->release(query);
2246 }
2247
recv_handler(struct ib_mad_agent * mad_agent,struct ib_mad_send_buf * send_buf,struct ib_mad_recv_wc * mad_recv_wc)2248 static void recv_handler(struct ib_mad_agent *mad_agent,
2249 struct ib_mad_send_buf *send_buf,
2250 struct ib_mad_recv_wc *mad_recv_wc)
2251 {
2252 struct ib_sa_query *query;
2253
2254 if (!send_buf)
2255 return;
2256
2257 query = send_buf->context[0];
2258 if (query->callback) {
2259 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2260 query->callback(query,
2261 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2262 -EINVAL : 0,
2263 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2264 else
2265 query->callback(query, -EIO, NULL);
2266 }
2267
2268 ib_free_recv_mad(mad_recv_wc);
2269 }
2270
update_sm_ah(struct work_struct * work)2271 static void update_sm_ah(struct work_struct *work)
2272 {
2273 struct ib_sa_port *port =
2274 container_of(work, struct ib_sa_port, update_task);
2275 struct ib_sa_sm_ah *new_ah;
2276 struct ib_port_attr port_attr;
2277 struct rdma_ah_attr ah_attr;
2278 bool grh_required;
2279
2280 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2281 pr_warn("Couldn't query port\n");
2282 return;
2283 }
2284
2285 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2286 if (!new_ah)
2287 return;
2288
2289 kref_init(&new_ah->ref);
2290 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2291
2292 new_ah->pkey_index = 0;
2293 if (ib_find_pkey(port->agent->device, port->port_num,
2294 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2295 pr_err("Couldn't find index for default PKey\n");
2296
2297 memset(&ah_attr, 0, sizeof(ah_attr));
2298 ah_attr.type = rdma_ah_find_type(port->agent->device,
2299 port->port_num);
2300 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2301 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2302 rdma_ah_set_port_num(&ah_attr, port->port_num);
2303
2304 grh_required = rdma_is_grh_required(port->agent->device,
2305 port->port_num);
2306
2307 /*
2308 * The OPA sm_lid of 0xFFFF needs special handling so that it can be
2309 * differentiated from a permissive LID of 0xFFFF. We set the
2310 * grh_required flag here so the SA can program the DGID in the
2311 * address handle appropriately
2312 */
2313 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2314 (grh_required ||
2315 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2316 rdma_ah_set_make_grd(&ah_attr, true);
2317
2318 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2319 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2320 rdma_ah_set_subnet_prefix(&ah_attr,
2321 cpu_to_be64(port_attr.subnet_prefix));
2322 rdma_ah_set_interface_id(&ah_attr,
2323 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2324 }
2325
2326 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
2327 if (IS_ERR(new_ah->ah)) {
2328 pr_warn("Couldn't create new SM AH\n");
2329 kfree(new_ah);
2330 return;
2331 }
2332
2333 spin_lock_irq(&port->ah_lock);
2334 if (port->sm_ah)
2335 kref_put(&port->sm_ah->ref, free_sm_ah);
2336 port->sm_ah = new_ah;
2337 spin_unlock_irq(&port->ah_lock);
2338 }
2339
ib_sa_event(struct ib_event_handler * handler,struct ib_event * event)2340 static void ib_sa_event(struct ib_event_handler *handler,
2341 struct ib_event *event)
2342 {
2343 if (event->event == IB_EVENT_PORT_ERR ||
2344 event->event == IB_EVENT_PORT_ACTIVE ||
2345 event->event == IB_EVENT_LID_CHANGE ||
2346 event->event == IB_EVENT_PKEY_CHANGE ||
2347 event->event == IB_EVENT_SM_CHANGE ||
2348 event->event == IB_EVENT_CLIENT_REREGISTER) {
2349 unsigned long flags;
2350 struct ib_sa_device *sa_dev =
2351 container_of(handler, typeof(*sa_dev), event_handler);
2352 u8 port_num = event->element.port_num - sa_dev->start_port;
2353 struct ib_sa_port *port = &sa_dev->port[port_num];
2354
2355 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2356 return;
2357
2358 spin_lock_irqsave(&port->ah_lock, flags);
2359 if (port->sm_ah)
2360 kref_put(&port->sm_ah->ref, free_sm_ah);
2361 port->sm_ah = NULL;
2362 spin_unlock_irqrestore(&port->ah_lock, flags);
2363
2364 if (event->event == IB_EVENT_SM_CHANGE ||
2365 event->event == IB_EVENT_CLIENT_REREGISTER ||
2366 event->event == IB_EVENT_LID_CHANGE ||
2367 event->event == IB_EVENT_PORT_ACTIVE) {
2368 unsigned long delay =
2369 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2370
2371 spin_lock_irqsave(&port->classport_lock, flags);
2372 port->classport_info.valid = false;
2373 port->classport_info.retry_cnt = 0;
2374 spin_unlock_irqrestore(&port->classport_lock, flags);
2375 queue_delayed_work(ib_wq,
2376 &port->ib_cpi_work, delay);
2377 }
2378 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2379 }
2380 }
2381
ib_sa_add_one(struct ib_device * device)2382 static void ib_sa_add_one(struct ib_device *device)
2383 {
2384 struct ib_sa_device *sa_dev;
2385 int s, e, i;
2386 int count = 0;
2387
2388 s = rdma_start_port(device);
2389 e = rdma_end_port(device);
2390
2391 sa_dev = kzalloc(sizeof *sa_dev +
2392 (e - s + 1) * sizeof (struct ib_sa_port),
2393 GFP_KERNEL);
2394 if (!sa_dev)
2395 return;
2396
2397 sa_dev->start_port = s;
2398 sa_dev->end_port = e;
2399
2400 for (i = 0; i <= e - s; ++i) {
2401 spin_lock_init(&sa_dev->port[i].ah_lock);
2402 if (!rdma_cap_ib_sa(device, i + 1))
2403 continue;
2404
2405 sa_dev->port[i].sm_ah = NULL;
2406 sa_dev->port[i].port_num = i + s;
2407
2408 spin_lock_init(&sa_dev->port[i].classport_lock);
2409 sa_dev->port[i].classport_info.valid = false;
2410
2411 sa_dev->port[i].agent =
2412 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2413 NULL, 0, send_handler,
2414 recv_handler, sa_dev, 0);
2415 if (IS_ERR(sa_dev->port[i].agent))
2416 goto err;
2417
2418 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2419 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2420 update_ib_cpi);
2421
2422 count++;
2423 }
2424
2425 if (!count)
2426 goto free;
2427
2428 ib_set_client_data(device, &sa_client, sa_dev);
2429
2430 /*
2431 * We register our event handler after everything is set up,
2432 * and then update our cached info after the event handler is
2433 * registered to avoid any problems if a port changes state
2434 * during our initialization.
2435 */
2436
2437 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2438 ib_register_event_handler(&sa_dev->event_handler);
2439
2440 for (i = 0; i <= e - s; ++i) {
2441 if (rdma_cap_ib_sa(device, i + 1))
2442 update_sm_ah(&sa_dev->port[i].update_task);
2443 }
2444
2445 return;
2446
2447 err:
2448 while (--i >= 0) {
2449 if (rdma_cap_ib_sa(device, i + 1))
2450 ib_unregister_mad_agent(sa_dev->port[i].agent);
2451 }
2452 free:
2453 kfree(sa_dev);
2454 return;
2455 }
2456
ib_sa_remove_one(struct ib_device * device,void * client_data)2457 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2458 {
2459 struct ib_sa_device *sa_dev = client_data;
2460 int i;
2461
2462 if (!sa_dev)
2463 return;
2464
2465 ib_unregister_event_handler(&sa_dev->event_handler);
2466 flush_workqueue(ib_wq);
2467
2468 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2469 if (rdma_cap_ib_sa(device, i + 1)) {
2470 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2471 ib_unregister_mad_agent(sa_dev->port[i].agent);
2472 if (sa_dev->port[i].sm_ah)
2473 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2474 }
2475
2476 }
2477
2478 kfree(sa_dev);
2479 }
2480
ib_sa_init(void)2481 int ib_sa_init(void)
2482 {
2483 int ret;
2484
2485 get_random_bytes(&tid, sizeof tid);
2486
2487 atomic_set(&ib_nl_sa_request_seq, 0);
2488
2489 ret = ib_register_client(&sa_client);
2490 if (ret) {
2491 pr_err("Couldn't register ib_sa client\n");
2492 goto err1;
2493 }
2494
2495 ret = mcast_init();
2496 if (ret) {
2497 pr_err("Couldn't initialize multicast handling\n");
2498 goto err2;
2499 }
2500
2501 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2502 if (!ib_nl_wq) {
2503 ret = -ENOMEM;
2504 goto err3;
2505 }
2506
2507 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2508
2509 return 0;
2510
2511 err3:
2512 mcast_cleanup();
2513 err2:
2514 ib_unregister_client(&sa_client);
2515 err1:
2516 return ret;
2517 }
2518
ib_sa_cleanup(void)2519 void ib_sa_cleanup(void)
2520 {
2521 cancel_delayed_work(&ib_nl_timed_work);
2522 flush_workqueue(ib_nl_wq);
2523 destroy_workqueue(ib_nl_wq);
2524 mcast_cleanup();
2525 ib_unregister_client(&sa_client);
2526 idr_destroy(&query_idr);
2527 }
2528