• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2004, 2011 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING the madirectory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use source and binary forms, with or
13  *     withmodification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retathe above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32  * SOFTWARE.
33  */
34 #if !defined(CM_MSGS_H)
35 #define CM_MSGS_H
36 
37 #include <rdma/ib_mad.h>
38 #include <rdma/ib_cm.h>
39 
40 /*
41  * Parameters to routines below should be in network-byte order, and values
42  * are returned in network-byte order.
43  */
44 
45 #define IB_CM_CLASS_VERSION	2 /* IB specification 1.2 */
46 
47 enum cm_msg_sequence {
48 	CM_MSG_SEQUENCE_REQ,
49 	CM_MSG_SEQUENCE_LAP,
50 	CM_MSG_SEQUENCE_DREQ,
51 	CM_MSG_SEQUENCE_SIDR
52 };
53 
54 struct cm_req_msg {
55 	struct ib_mad_hdr hdr;
56 
57 	__be32 local_comm_id;
58 	__be32 rsvd4;
59 	__be64 service_id;
60 	__be64 local_ca_guid;
61 	__be32 rsvd24;
62 	__be32 local_qkey;
63 	/* local QPN:24, responder resources:8 */
64 	__be32 offset32;
65 	/* local EECN:24, initiator depth:8 */
66 	__be32 offset36;
67 	/*
68 	 * remote EECN:24, remote CM response timeout:5,
69 	 * transport service type:2, end-to-end flow control:1
70 	 */
71 	__be32 offset40;
72 	/* starting PSN:24, local CM response timeout:5, retry count:3 */
73 	__be32 offset44;
74 	__be16 pkey;
75 	/* path MTU:4, RDC exists:1, RNR retry count:3. */
76 	u8 offset50;
77 	/* max CM Retries:4, SRQ:1, extended transport type:3 */
78 	u8 offset51;
79 
80 	__be16 primary_local_lid;
81 	__be16 primary_remote_lid;
82 	union ib_gid primary_local_gid;
83 	union ib_gid primary_remote_gid;
84 	/* flow label:20, rsvd:6, packet rate:6 */
85 	__be32 primary_offset88;
86 	u8 primary_traffic_class;
87 	u8 primary_hop_limit;
88 	/* SL:4, subnet local:1, rsvd:3 */
89 	u8 primary_offset94;
90 	/* local ACK timeout:5, rsvd:3 */
91 	u8 primary_offset95;
92 
93 	__be16 alt_local_lid;
94 	__be16 alt_remote_lid;
95 	union ib_gid alt_local_gid;
96 	union ib_gid alt_remote_gid;
97 	/* flow label:20, rsvd:6, packet rate:6 */
98 	__be32 alt_offset132;
99 	u8 alt_traffic_class;
100 	u8 alt_hop_limit;
101 	/* SL:4, subnet local:1, rsvd:3 */
102 	u8 alt_offset138;
103 	/* local ACK timeout:5, rsvd:3 */
104 	u8 alt_offset139;
105 
106 	u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
107 
108 } __attribute__ ((packed));
109 
cm_req_get_local_qpn(struct cm_req_msg * req_msg)110 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
111 {
112 	return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
113 }
114 
cm_req_set_local_qpn(struct cm_req_msg * req_msg,__be32 qpn)115 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
116 {
117 	req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
118 					 (be32_to_cpu(req_msg->offset32) &
119 					  0x000000FF));
120 }
121 
cm_req_get_resp_res(struct cm_req_msg * req_msg)122 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
123 {
124 	return (u8) be32_to_cpu(req_msg->offset32);
125 }
126 
cm_req_set_resp_res(struct cm_req_msg * req_msg,u8 resp_res)127 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
128 {
129 	req_msg->offset32 = cpu_to_be32(resp_res |
130 					(be32_to_cpu(req_msg->offset32) &
131 					 0xFFFFFF00));
132 }
133 
cm_req_get_init_depth(struct cm_req_msg * req_msg)134 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
135 {
136 	return (u8) be32_to_cpu(req_msg->offset36);
137 }
138 
cm_req_set_init_depth(struct cm_req_msg * req_msg,u8 init_depth)139 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
140 					 u8 init_depth)
141 {
142 	req_msg->offset36 = cpu_to_be32(init_depth |
143 					(be32_to_cpu(req_msg->offset36) &
144 					 0xFFFFFF00));
145 }
146 
cm_req_get_remote_resp_timeout(struct cm_req_msg * req_msg)147 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
148 {
149 	return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
150 }
151 
cm_req_set_remote_resp_timeout(struct cm_req_msg * req_msg,u8 resp_timeout)152 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
153 						  u8 resp_timeout)
154 {
155 	req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
156 					 (be32_to_cpu(req_msg->offset40) &
157 					  0xFFFFFF07));
158 }
159 
cm_req_get_qp_type(struct cm_req_msg * req_msg)160 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
161 {
162 	u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
163 	switch(transport_type) {
164 	case 0: return IB_QPT_RC;
165 	case 1: return IB_QPT_UC;
166 	case 3:
167 		switch (req_msg->offset51 & 0x7) {
168 		case 1: return IB_QPT_XRC_TGT;
169 		default: return 0;
170 		}
171 	default: return 0;
172 	}
173 }
174 
cm_req_set_qp_type(struct cm_req_msg * req_msg,enum ib_qp_type qp_type)175 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
176 				      enum ib_qp_type qp_type)
177 {
178 	switch(qp_type) {
179 	case IB_QPT_UC:
180 		req_msg->offset40 = cpu_to_be32((be32_to_cpu(
181 						  req_msg->offset40) &
182 						   0xFFFFFFF9) | 0x2);
183 		break;
184 	case IB_QPT_XRC_INI:
185 		req_msg->offset40 = cpu_to_be32((be32_to_cpu(
186 						 req_msg->offset40) &
187 						   0xFFFFFFF9) | 0x6);
188 		req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
189 		break;
190 	default:
191 		req_msg->offset40 = cpu_to_be32(be32_to_cpu(
192 						 req_msg->offset40) &
193 						  0xFFFFFFF9);
194 	}
195 }
196 
cm_req_get_flow_ctrl(struct cm_req_msg * req_msg)197 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
198 {
199 	return be32_to_cpu(req_msg->offset40) & 0x1;
200 }
201 
cm_req_set_flow_ctrl(struct cm_req_msg * req_msg,u8 flow_ctrl)202 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
203 					u8 flow_ctrl)
204 {
205 	req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
206 					 (be32_to_cpu(req_msg->offset40) &
207 					  0xFFFFFFFE));
208 }
209 
cm_req_get_starting_psn(struct cm_req_msg * req_msg)210 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
211 {
212 	return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
213 }
214 
cm_req_set_starting_psn(struct cm_req_msg * req_msg,__be32 starting_psn)215 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
216 					   __be32 starting_psn)
217 {
218 	req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
219 			    (be32_to_cpu(req_msg->offset44) & 0x000000FF));
220 }
221 
cm_req_get_local_resp_timeout(struct cm_req_msg * req_msg)222 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
223 {
224 	return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
225 }
226 
cm_req_set_local_resp_timeout(struct cm_req_msg * req_msg,u8 resp_timeout)227 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
228 						 u8 resp_timeout)
229 {
230 	req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
231 			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
232 }
233 
cm_req_get_retry_count(struct cm_req_msg * req_msg)234 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
235 {
236 	return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
237 }
238 
cm_req_set_retry_count(struct cm_req_msg * req_msg,u8 retry_count)239 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
240 					  u8 retry_count)
241 {
242 	req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
243 			    (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
244 }
245 
cm_req_get_path_mtu(struct cm_req_msg * req_msg)246 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
247 {
248 	return req_msg->offset50 >> 4;
249 }
250 
cm_req_set_path_mtu(struct cm_req_msg * req_msg,u8 path_mtu)251 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
252 {
253 	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
254 }
255 
cm_req_get_rnr_retry_count(struct cm_req_msg * req_msg)256 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
257 {
258 	return req_msg->offset50 & 0x7;
259 }
260 
cm_req_set_rnr_retry_count(struct cm_req_msg * req_msg,u8 rnr_retry_count)261 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
262 					      u8 rnr_retry_count)
263 {
264 	req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
265 				  (rnr_retry_count & 0x7));
266 }
267 
cm_req_get_max_cm_retries(struct cm_req_msg * req_msg)268 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
269 {
270 	return req_msg->offset51 >> 4;
271 }
272 
cm_req_set_max_cm_retries(struct cm_req_msg * req_msg,u8 retries)273 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
274 					     u8 retries)
275 {
276 	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
277 }
278 
cm_req_get_srq(struct cm_req_msg * req_msg)279 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
280 {
281 	return (req_msg->offset51 & 0x8) >> 3;
282 }
283 
cm_req_set_srq(struct cm_req_msg * req_msg,u8 srq)284 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
285 {
286 	req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
287 				  ((srq & 0x1) << 3));
288 }
289 
cm_req_get_primary_flow_label(struct cm_req_msg * req_msg)290 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
291 {
292 	return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
293 }
294 
cm_req_set_primary_flow_label(struct cm_req_msg * req_msg,__be32 flow_label)295 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
296 						 __be32 flow_label)
297 {
298 	req_msg->primary_offset88 = cpu_to_be32(
299 				    (be32_to_cpu(req_msg->primary_offset88) &
300 				     0x00000FFF) |
301 				     (be32_to_cpu(flow_label) << 12));
302 }
303 
cm_req_get_primary_packet_rate(struct cm_req_msg * req_msg)304 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
305 {
306 	return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
307 }
308 
cm_req_set_primary_packet_rate(struct cm_req_msg * req_msg,u8 rate)309 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
310 						  u8 rate)
311 {
312 	req_msg->primary_offset88 = cpu_to_be32(
313 				    (be32_to_cpu(req_msg->primary_offset88) &
314 				     0xFFFFFFC0) | (rate & 0x3F));
315 }
316 
cm_req_get_primary_sl(struct cm_req_msg * req_msg)317 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
318 {
319 	return (u8) (req_msg->primary_offset94 >> 4);
320 }
321 
cm_req_set_primary_sl(struct cm_req_msg * req_msg,u8 sl)322 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
323 {
324 	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
325 					  (sl << 4));
326 }
327 
cm_req_get_primary_subnet_local(struct cm_req_msg * req_msg)328 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
329 {
330 	return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
331 }
332 
cm_req_set_primary_subnet_local(struct cm_req_msg * req_msg,u8 subnet_local)333 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
334 						   u8 subnet_local)
335 {
336 	req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
337 					  ((subnet_local & 0x1) << 3));
338 }
339 
cm_req_get_primary_local_ack_timeout(struct cm_req_msg * req_msg)340 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
341 {
342 	return (u8) (req_msg->primary_offset95 >> 3);
343 }
344 
cm_req_set_primary_local_ack_timeout(struct cm_req_msg * req_msg,u8 local_ack_timeout)345 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
346 							u8 local_ack_timeout)
347 {
348 	req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
349 					  (local_ack_timeout << 3));
350 }
351 
cm_req_get_alt_flow_label(struct cm_req_msg * req_msg)352 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
353 {
354 	return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
355 }
356 
cm_req_set_alt_flow_label(struct cm_req_msg * req_msg,__be32 flow_label)357 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
358 					     __be32 flow_label)
359 {
360 	req_msg->alt_offset132 = cpu_to_be32(
361 				 (be32_to_cpu(req_msg->alt_offset132) &
362 				  0x00000FFF) |
363 				  (be32_to_cpu(flow_label) << 12));
364 }
365 
cm_req_get_alt_packet_rate(struct cm_req_msg * req_msg)366 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
367 {
368 	return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
369 }
370 
cm_req_set_alt_packet_rate(struct cm_req_msg * req_msg,u8 rate)371 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
372 					      u8 rate)
373 {
374 	req_msg->alt_offset132 = cpu_to_be32(
375 				 (be32_to_cpu(req_msg->alt_offset132) &
376 				  0xFFFFFFC0) | (rate & 0x3F));
377 }
378 
cm_req_get_alt_sl(struct cm_req_msg * req_msg)379 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
380 {
381 	return (u8) (req_msg->alt_offset138 >> 4);
382 }
383 
cm_req_set_alt_sl(struct cm_req_msg * req_msg,u8 sl)384 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
385 {
386 	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
387 				       (sl << 4));
388 }
389 
cm_req_get_alt_subnet_local(struct cm_req_msg * req_msg)390 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
391 {
392 	return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
393 }
394 
cm_req_set_alt_subnet_local(struct cm_req_msg * req_msg,u8 subnet_local)395 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
396 					       u8 subnet_local)
397 {
398 	req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
399 				       ((subnet_local & 0x1) << 3));
400 }
401 
cm_req_get_alt_local_ack_timeout(struct cm_req_msg * req_msg)402 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
403 {
404 	return (u8) (req_msg->alt_offset139 >> 3);
405 }
406 
cm_req_set_alt_local_ack_timeout(struct cm_req_msg * req_msg,u8 local_ack_timeout)407 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
408 						    u8 local_ack_timeout)
409 {
410 	req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
411 				       (local_ack_timeout << 3));
412 }
413 
414 /* Message REJected or MRAed */
415 enum cm_msg_response {
416 	CM_MSG_RESPONSE_REQ = 0x0,
417 	CM_MSG_RESPONSE_REP = 0x1,
418 	CM_MSG_RESPONSE_OTHER = 0x2
419 };
420 
421  struct cm_mra_msg {
422 	struct ib_mad_hdr hdr;
423 
424 	__be32 local_comm_id;
425 	__be32 remote_comm_id;
426 	/* message MRAed:2, rsvd:6 */
427 	u8 offset8;
428 	/* service timeout:5, rsvd:3 */
429 	u8 offset9;
430 
431 	u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
432 
433 } __attribute__ ((packed));
434 
cm_mra_get_msg_mraed(struct cm_mra_msg * mra_msg)435 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
436 {
437 	return (u8) (mra_msg->offset8 >> 6);
438 }
439 
cm_mra_set_msg_mraed(struct cm_mra_msg * mra_msg,u8 msg)440 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
441 {
442 	mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
443 }
444 
cm_mra_get_service_timeout(struct cm_mra_msg * mra_msg)445 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
446 {
447 	return (u8) (mra_msg->offset9 >> 3);
448 }
449 
cm_mra_set_service_timeout(struct cm_mra_msg * mra_msg,u8 service_timeout)450 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
451 					      u8 service_timeout)
452 {
453 	mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
454 				 (service_timeout << 3));
455 }
456 
457 struct cm_rej_msg {
458 	struct ib_mad_hdr hdr;
459 
460 	__be32 local_comm_id;
461 	__be32 remote_comm_id;
462 	/* message REJected:2, rsvd:6 */
463 	u8 offset8;
464 	/* reject info length:7, rsvd:1. */
465 	u8 offset9;
466 	__be16 reason;
467 	u8 ari[IB_CM_REJ_ARI_LENGTH];
468 
469 	u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
470 
471 } __attribute__ ((packed));
472 
cm_rej_get_msg_rejected(struct cm_rej_msg * rej_msg)473 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
474 {
475 	return (u8) (rej_msg->offset8 >> 6);
476 }
477 
cm_rej_set_msg_rejected(struct cm_rej_msg * rej_msg,u8 msg)478 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
479 {
480 	rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
481 }
482 
cm_rej_get_reject_info_len(struct cm_rej_msg * rej_msg)483 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
484 {
485 	return (u8) (rej_msg->offset9 >> 1);
486 }
487 
cm_rej_set_reject_info_len(struct cm_rej_msg * rej_msg,u8 len)488 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
489 					      u8 len)
490 {
491 	rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
492 }
493 
494 struct cm_rep_msg {
495 	struct ib_mad_hdr hdr;
496 
497 	__be32 local_comm_id;
498 	__be32 remote_comm_id;
499 	__be32 local_qkey;
500 	/* local QPN:24, rsvd:8 */
501 	__be32 offset12;
502 	/* local EECN:24, rsvd:8 */
503 	__be32 offset16;
504 	/* starting PSN:24 rsvd:8 */
505 	__be32 offset20;
506 	u8 resp_resources;
507 	u8 initiator_depth;
508 	/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
509 	u8 offset26;
510 	/* RNR retry count:3, SRQ:1, rsvd:5 */
511 	u8 offset27;
512 	__be64 local_ca_guid;
513 
514 	u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
515 
516 } __attribute__ ((packed));
517 
cm_rep_get_local_qpn(struct cm_rep_msg * rep_msg)518 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
519 {
520 	return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
521 }
522 
cm_rep_set_local_qpn(struct cm_rep_msg * rep_msg,__be32 qpn)523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
524 {
525 	rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
526 			    (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
527 }
528 
cm_rep_get_local_eecn(struct cm_rep_msg * rep_msg)529 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
530 {
531 	return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
532 }
533 
cm_rep_set_local_eecn(struct cm_rep_msg * rep_msg,__be32 eecn)534 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
535 {
536 	rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
537 			    (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
538 }
539 
cm_rep_get_qpn(struct cm_rep_msg * rep_msg,enum ib_qp_type qp_type)540 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
541 {
542 	return (qp_type == IB_QPT_XRC_INI) ?
543 		cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
544 }
545 
cm_rep_get_starting_psn(struct cm_rep_msg * rep_msg)546 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
547 {
548 	return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
549 }
550 
cm_rep_set_starting_psn(struct cm_rep_msg * rep_msg,__be32 starting_psn)551 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
552 					   __be32 starting_psn)
553 {
554 	rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
555 			    (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
556 }
557 
cm_rep_get_target_ack_delay(struct cm_rep_msg * rep_msg)558 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
559 {
560 	return (u8) (rep_msg->offset26 >> 3);
561 }
562 
cm_rep_set_target_ack_delay(struct cm_rep_msg * rep_msg,u8 target_ack_delay)563 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
564 					       u8 target_ack_delay)
565 {
566 	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
567 				  (target_ack_delay << 3));
568 }
569 
cm_rep_get_failover(struct cm_rep_msg * rep_msg)570 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
571 {
572 	return (u8) ((rep_msg->offset26 & 0x06) >> 1);
573 }
574 
cm_rep_set_failover(struct cm_rep_msg * rep_msg,u8 failover)575 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
576 {
577 	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
578 				  ((failover & 0x3) << 1));
579 }
580 
cm_rep_get_flow_ctrl(struct cm_rep_msg * rep_msg)581 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
582 {
583 	return (u8) (rep_msg->offset26 & 0x01);
584 }
585 
cm_rep_set_flow_ctrl(struct cm_rep_msg * rep_msg,u8 flow_ctrl)586 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
587 					    u8 flow_ctrl)
588 {
589 	rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
590 				  (flow_ctrl & 0x1));
591 }
592 
cm_rep_get_rnr_retry_count(struct cm_rep_msg * rep_msg)593 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
594 {
595 	return (u8) (rep_msg->offset27 >> 5);
596 }
597 
cm_rep_set_rnr_retry_count(struct cm_rep_msg * rep_msg,u8 rnr_retry_count)598 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
599 					      u8 rnr_retry_count)
600 {
601 	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
602 				  (rnr_retry_count << 5));
603 }
604 
cm_rep_get_srq(struct cm_rep_msg * rep_msg)605 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
606 {
607 	return (u8) ((rep_msg->offset27 >> 4) & 0x1);
608 }
609 
cm_rep_set_srq(struct cm_rep_msg * rep_msg,u8 srq)610 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
611 {
612 	rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
613 				  ((srq & 0x1) << 4));
614 }
615 
616 struct cm_rtu_msg {
617 	struct ib_mad_hdr hdr;
618 
619 	__be32 local_comm_id;
620 	__be32 remote_comm_id;
621 
622 	u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
623 
624 } __attribute__ ((packed));
625 
626 struct cm_dreq_msg {
627 	struct ib_mad_hdr hdr;
628 
629 	__be32 local_comm_id;
630 	__be32 remote_comm_id;
631 	/* remote QPN/EECN:24, rsvd:8 */
632 	__be32 offset8;
633 
634 	u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
635 
636 } __attribute__ ((packed));
637 
cm_dreq_get_remote_qpn(struct cm_dreq_msg * dreq_msg)638 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
639 {
640 	return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
641 }
642 
cm_dreq_set_remote_qpn(struct cm_dreq_msg * dreq_msg,__be32 qpn)643 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
644 {
645 	dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
646 			    (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
647 }
648 
649 struct cm_drep_msg {
650 	struct ib_mad_hdr hdr;
651 
652 	__be32 local_comm_id;
653 	__be32 remote_comm_id;
654 
655 	u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
656 
657 } __attribute__ ((packed));
658 
659 struct cm_lap_msg {
660 	struct ib_mad_hdr hdr;
661 
662 	__be32 local_comm_id;
663 	__be32 remote_comm_id;
664 
665 	__be32 rsvd8;
666 	/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
667 	__be32 offset12;
668 	__be32 rsvd16;
669 
670 	__be16 alt_local_lid;
671 	__be16 alt_remote_lid;
672 	union ib_gid alt_local_gid;
673 	union ib_gid alt_remote_gid;
674 	/* flow label:20, rsvd:4, traffic class:8 */
675 	__be32 offset56;
676 	u8 alt_hop_limit;
677 	/* rsvd:2, packet rate:6 */
678 	u8 offset61;
679 	/* SL:4, subnet local:1, rsvd:3 */
680 	u8 offset62;
681 	/* local ACK timeout:5, rsvd:3 */
682 	u8 offset63;
683 
684 	u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
685 } __attribute__  ((packed));
686 
cm_lap_get_remote_qpn(struct cm_lap_msg * lap_msg)687 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
688 {
689 	return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
690 }
691 
cm_lap_set_remote_qpn(struct cm_lap_msg * lap_msg,__be32 qpn)692 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
693 {
694 	lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
695 					 (be32_to_cpu(lap_msg->offset12) &
696 					  0x000000FF));
697 }
698 
cm_lap_get_remote_resp_timeout(struct cm_lap_msg * lap_msg)699 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
700 {
701 	return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
702 }
703 
cm_lap_set_remote_resp_timeout(struct cm_lap_msg * lap_msg,u8 resp_timeout)704 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
705 						  u8 resp_timeout)
706 {
707 	lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
708 					 (be32_to_cpu(lap_msg->offset12) &
709 					  0xFFFFFF07));
710 }
711 
cm_lap_get_flow_label(struct cm_lap_msg * lap_msg)712 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
713 {
714 	return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
715 }
716 
cm_lap_set_flow_label(struct cm_lap_msg * lap_msg,__be32 flow_label)717 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
718 					 __be32 flow_label)
719 {
720 	lap_msg->offset56 = cpu_to_be32(
721 				 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
722 				 (be32_to_cpu(flow_label) << 12));
723 }
724 
cm_lap_get_traffic_class(struct cm_lap_msg * lap_msg)725 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
726 {
727 	return (u8) be32_to_cpu(lap_msg->offset56);
728 }
729 
cm_lap_set_traffic_class(struct cm_lap_msg * lap_msg,u8 traffic_class)730 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
731 					    u8 traffic_class)
732 {
733 	lap_msg->offset56 = cpu_to_be32(traffic_class |
734 					 (be32_to_cpu(lap_msg->offset56) &
735 					  0xFFFFFF00));
736 }
737 
cm_lap_get_packet_rate(struct cm_lap_msg * lap_msg)738 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
739 {
740 	return lap_msg->offset61 & 0x3F;
741 }
742 
cm_lap_set_packet_rate(struct cm_lap_msg * lap_msg,u8 packet_rate)743 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
744 					  u8 packet_rate)
745 {
746 	lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
747 }
748 
cm_lap_get_sl(struct cm_lap_msg * lap_msg)749 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
750 {
751 	return lap_msg->offset62 >> 4;
752 }
753 
cm_lap_set_sl(struct cm_lap_msg * lap_msg,u8 sl)754 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
755 {
756 	lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
757 }
758 
cm_lap_get_subnet_local(struct cm_lap_msg * lap_msg)759 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
760 {
761 	return (lap_msg->offset62 >> 3) & 0x1;
762 }
763 
cm_lap_set_subnet_local(struct cm_lap_msg * lap_msg,u8 subnet_local)764 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
765 					   u8 subnet_local)
766 {
767 	lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
768 			     (lap_msg->offset61 & 0xF7);
769 }
cm_lap_get_local_ack_timeout(struct cm_lap_msg * lap_msg)770 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
771 {
772 	return lap_msg->offset63 >> 3;
773 }
774 
cm_lap_set_local_ack_timeout(struct cm_lap_msg * lap_msg,u8 local_ack_timeout)775 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
776 						u8 local_ack_timeout)
777 {
778 	lap_msg->offset63 = (local_ack_timeout << 3) |
779 			    (lap_msg->offset63 & 0x07);
780 }
781 
782 struct cm_apr_msg {
783 	struct ib_mad_hdr hdr;
784 
785 	__be32 local_comm_id;
786 	__be32 remote_comm_id;
787 
788 	u8 info_length;
789 	u8 ap_status;
790 	__be16 rsvd;
791 	u8 info[IB_CM_APR_INFO_LENGTH];
792 
793 	u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
794 } __attribute__ ((packed));
795 
796 struct cm_sidr_req_msg {
797 	struct ib_mad_hdr hdr;
798 
799 	__be32 request_id;
800 	__be16 pkey;
801 	__be16 rsvd;
802 	__be64 service_id;
803 
804 	u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
805 } __attribute__ ((packed));
806 
807 struct cm_sidr_rep_msg {
808 	struct ib_mad_hdr hdr;
809 
810 	__be32 request_id;
811 	u8 status;
812 	u8 info_length;
813 	__be16 rsvd;
814 	/* QPN:24, rsvd:8 */
815 	__be32 offset8;
816 	__be64 service_id;
817 	__be32 qkey;
818 	u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
819 
820 	u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
821 } __attribute__ ((packed));
822 
cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg * sidr_rep_msg)823 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
824 {
825 	return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
826 }
827 
cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg * sidr_rep_msg,__be32 qpn)828 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
829 				       __be32 qpn)
830 {
831 	sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
832 					(be32_to_cpu(sidr_rep_msg->offset8) &
833 					 0x000000FF));
834 }
835 
836 #endif /* CM_MSGS_H */
837