• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  SQP functions
5  *
6  *  Authors: Khadija Souissi <souissi@de.ibm.com>
7  *           Heiko J Schick <schickhj@de.ibm.com>
8  *
9  *  Copyright (c) 2005 IBM Corporation
10  *
11  *  All rights reserved.
12  *
13  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
14  *  BSD.
15  *
16  * OpenIB BSD License
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are met:
20  *
21  * Redistributions of source code must retain the above copyright notice, this
22  * list of conditions and the following disclaimer.
23  *
24  * Redistributions in binary form must reproduce the above copyright notice,
25  * this list of conditions and the following disclaimer in the documentation
26  * and/or other materials
27  * provided with the distribution.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include <rdma/ib_mad.h>
43 
44 #include "ehca_classes.h"
45 #include "ehca_tools.h"
46 #include "ehca_iverbs.h"
47 #include "hcp_if.h"
48 
49 #define IB_MAD_STATUS_REDIRECT		cpu_to_be16(0x0002)
50 #define IB_MAD_STATUS_UNSUP_VERSION	cpu_to_be16(0x0004)
51 #define IB_MAD_STATUS_UNSUP_METHOD	cpu_to_be16(0x0008)
52 
53 #define IB_PMA_CLASS_PORT_INFO		cpu_to_be16(0x0001)
54 
55 /**
56  * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
57  * pair is created successfully, the corresponding port gets active.
58  *
59  * Define Special Queue pair 0 (SMI QP) is still not supported.
60  *
61  * @qp_init_attr: Queue pair init attributes with port and queue pair type
62  */
63 
ehca_define_sqp(struct ehca_shca * shca,struct ehca_qp * ehca_qp,struct ib_qp_init_attr * qp_init_attr)64 u64 ehca_define_sqp(struct ehca_shca *shca,
65 		    struct ehca_qp *ehca_qp,
66 		    struct ib_qp_init_attr *qp_init_attr)
67 {
68 	u32 pma_qp_nr, bma_qp_nr;
69 	u64 ret;
70 	u8 port = qp_init_attr->port_num;
71 	int counter;
72 
73 	shca->sport[port - 1].port_state = IB_PORT_DOWN;
74 
75 	switch (qp_init_attr->qp_type) {
76 	case IB_QPT_SMI:
77 		/* function not supported yet */
78 		break;
79 	case IB_QPT_GSI:
80 		ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
81 					 ehca_qp->ipz_qp_handle,
82 					 ehca_qp->galpas.kernel,
83 					 (u32) qp_init_attr->port_num,
84 					 &pma_qp_nr, &bma_qp_nr);
85 
86 		if (ret != H_SUCCESS) {
87 			ehca_err(&shca->ib_device,
88 				 "Can't define AQP1 for port %x. h_ret=%lli",
89 				 port, ret);
90 			return ret;
91 		}
92 		shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
93 		ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
94 			 port, pma_qp_nr);
95 		break;
96 	default:
97 		ehca_err(&shca->ib_device, "invalid qp_type=%x",
98 			 qp_init_attr->qp_type);
99 		return H_PARAMETER;
100 	}
101 
102 	if (ehca_nr_ports < 0) /* autodetect mode */
103 		return H_SUCCESS;
104 
105 	for (counter = 0;
106 	     shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
107 		     counter < ehca_port_act_time;
108 	     counter++) {
109 		ehca_dbg(&shca->ib_device, "... wait until port %x is active",
110 			 port);
111 		msleep_interruptible(1000);
112 	}
113 
114 	if (counter == ehca_port_act_time) {
115 		ehca_err(&shca->ib_device, "Port %x is not active.", port);
116 		return H_HARDWARE;
117 	}
118 
119 	return H_SUCCESS;
120 }
121 
122 struct ib_perf {
123 	struct ib_mad_hdr mad_hdr;
124 	u8 reserved[40];
125 	u8 data[192];
126 } __attribute__ ((packed));
127 
128 /* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
129 struct tcslfl {
130 	u32 tc:8;
131 	u32 sl:4;
132 	u32 fl:20;
133 } __attribute__ ((packed));
134 
135 /* IP Version/TC/FL packed into 32 bits, as in GRH */
136 struct vertcfl {
137 	u32 ver:4;
138 	u32 tc:8;
139 	u32 fl:20;
140 } __attribute__ ((packed));
141 
ehca_process_perf(struct ib_device * ibdev,u8 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in_mad,struct ib_mad * out_mad)142 static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
143 			     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
144 			     const struct ib_mad *in_mad, struct ib_mad *out_mad)
145 {
146 	const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
147 	struct ib_perf *out_perf = (struct ib_perf *)out_mad;
148 	struct ib_class_port_info *poi =
149 		(struct ib_class_port_info *)out_perf->data;
150 	struct tcslfl *tcslfl =
151 		(struct tcslfl *)&poi->redirect_tcslfl;
152 	struct ehca_shca *shca =
153 		container_of(ibdev, struct ehca_shca, ib_device);
154 	struct ehca_sport *sport = &shca->sport[port_num - 1];
155 
156 	ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
157 
158 	*out_mad = *in_mad;
159 
160 	if (in_perf->mad_hdr.class_version != 1) {
161 		ehca_warn(ibdev, "Unsupported class_version=%x",
162 			  in_perf->mad_hdr.class_version);
163 		out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
164 		goto perf_reply;
165 	}
166 
167 	switch (in_perf->mad_hdr.method) {
168 	case IB_MGMT_METHOD_GET:
169 	case IB_MGMT_METHOD_SET:
170 		/* set class port info for redirection */
171 		out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
172 		out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
173 		memset(poi, 0, sizeof(*poi));
174 		poi->base_version = 1;
175 		poi->class_version = 1;
176 		poi->resp_time_value = 18;
177 
178 		/* copy local routing information from WC where applicable */
179 		tcslfl->sl         = in_wc->sl;
180 		poi->redirect_lid  =
181 			sport->saved_attr.lid | in_wc->dlid_path_bits;
182 		poi->redirect_qp   = sport->pma_qp_nr;
183 		poi->redirect_qkey = IB_QP1_QKEY;
184 
185 		ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
186 				&poi->redirect_pkey);
187 
188 		/* if request was globally routed, copy route info */
189 		if (in_grh) {
190 			const struct vertcfl *vertcfl =
191 				(const struct vertcfl *)&in_grh->version_tclass_flow;
192 			memcpy(poi->redirect_gid, in_grh->dgid.raw,
193 			       sizeof(poi->redirect_gid));
194 			tcslfl->tc        = vertcfl->tc;
195 			tcslfl->fl        = vertcfl->fl;
196 		} else
197 			/* else only fill in default GID */
198 			ehca_query_gid(ibdev, port_num, 0,
199 				       (union ib_gid *)&poi->redirect_gid);
200 
201 		ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
202 			 sport->saved_attr.lid, sport->pma_qp_nr);
203 		break;
204 
205 	case IB_MGMT_METHOD_GET_RESP:
206 		return IB_MAD_RESULT_FAILURE;
207 
208 	default:
209 		out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
210 		break;
211 	}
212 
213 perf_reply:
214 	out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
215 
216 	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
217 }
218 
ehca_process_mad(struct ib_device * ibdev,int mad_flags,u8 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad_hdr * in,size_t in_mad_size,struct ib_mad_hdr * out,size_t * out_mad_size,u16 * out_mad_pkey_index)219 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
220 		     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
221 		     const struct ib_mad_hdr *in, size_t in_mad_size,
222 		     struct ib_mad_hdr *out, size_t *out_mad_size,
223 		     u16 *out_mad_pkey_index)
224 {
225 	int ret;
226 	const struct ib_mad *in_mad = (const struct ib_mad *)in;
227 	struct ib_mad *out_mad = (struct ib_mad *)out;
228 
229 	if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
230 			 *out_mad_size != sizeof(*out_mad)))
231 		return IB_MAD_RESULT_FAILURE;
232 
233 	if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
234 		return IB_MAD_RESULT_FAILURE;
235 
236 	/* accept only pma request */
237 	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
238 		return IB_MAD_RESULT_SUCCESS;
239 
240 	ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
241 	ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
242 				in_mad, out_mad);
243 
244 	return ret;
245 }
246