• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <linux/ctype.h>
35 #include <rdma/ib_sysfs.h>
36 
37 #include "qib.h"
38 #include "qib_mad.h"
39 
qib_get_pportdata_kobj(struct kobject * kobj)40 static struct qib_pportdata *qib_get_pportdata_kobj(struct kobject *kobj)
41 {
42 	u32 port_num;
43 	struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num);
44 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
45 
46 	return &dd->pport[port_num - 1];
47 }
48 
49 /*
50  * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
51  */
hrtbt_enable_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)52 static ssize_t hrtbt_enable_show(struct ib_device *ibdev, u32 port_num,
53 				 struct ib_port_attribute *attr, char *buf)
54 {
55 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
56 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
57 
58 	return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
59 }
60 
hrtbt_enable_store(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,const char * buf,size_t count)61 static ssize_t hrtbt_enable_store(struct ib_device *ibdev, u32 port_num,
62 				  struct ib_port_attribute *attr,
63 				  const char *buf, size_t count)
64 {
65 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
66 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
67 	int ret;
68 	u16 val;
69 
70 	ret = kstrtou16(buf, 0, &val);
71 	if (ret) {
72 		qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
73 		return ret;
74 	}
75 
76 	/*
77 	 * Set the "intentional" heartbeat enable per either of
78 	 * "Enable" and "Auto", as these are normally set together.
79 	 * This bit is consulted when leaving loopback mode,
80 	 * because entering loopback mode overrides it and automatically
81 	 * disables heartbeat.
82 	 */
83 	ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
84 	return ret < 0 ? ret : count;
85 }
86 static IB_PORT_ATTR_RW(hrtbt_enable);
87 
loopback_store(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,const char * buf,size_t count)88 static ssize_t loopback_store(struct ib_device *ibdev, u32 port_num,
89 			      struct ib_port_attribute *attr, const char *buf,
90 			      size_t count)
91 {
92 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
93 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
94 	int ret = count, r;
95 
96 	r = dd->f_set_ib_loopback(ppd, buf);
97 	if (r < 0)
98 		ret = r;
99 
100 	return ret;
101 }
102 static IB_PORT_ATTR_WO(loopback);
103 
led_override_store(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,const char * buf,size_t count)104 static ssize_t led_override_store(struct ib_device *ibdev, u32 port_num,
105 				  struct ib_port_attribute *attr,
106 				  const char *buf, size_t count)
107 {
108 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
109 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
110 	int ret;
111 	u16 val;
112 
113 	ret = kstrtou16(buf, 0, &val);
114 	if (ret) {
115 		qib_dev_err(dd, "attempt to set invalid LED override\n");
116 		return ret;
117 	}
118 
119 	qib_set_led_override(ppd, val);
120 	return count;
121 }
122 static IB_PORT_ATTR_WO(led_override);
123 
status_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)124 static ssize_t status_show(struct ib_device *ibdev, u32 port_num,
125 			   struct ib_port_attribute *attr, char *buf)
126 {
127 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
128 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
129 
130 	if (!ppd->statusp)
131 		return -EINVAL;
132 
133 	return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
134 }
135 static IB_PORT_ATTR_RO(status);
136 
137 /*
138  * For userland compatibility, these offsets must remain fixed.
139  * They are strings for QIB_STATUS_*
140  */
141 static const char * const qib_status_str[] = {
142 	"Initted",
143 	"",
144 	"",
145 	"",
146 	"",
147 	"Present",
148 	"IB_link_up",
149 	"IB_configured",
150 	"",
151 	"Fatal_Hardware_Error",
152 	NULL,
153 };
154 
status_str_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)155 static ssize_t status_str_show(struct ib_device *ibdev, u32 port_num,
156 			       struct ib_port_attribute *attr, char *buf)
157 {
158 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
159 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
160 	int i, any;
161 	u64 s;
162 	ssize_t ret;
163 
164 	if (!ppd->statusp) {
165 		ret = -EINVAL;
166 		goto bail;
167 	}
168 
169 	s = *(ppd->statusp);
170 	*buf = '\0';
171 	for (any = i = 0; s && qib_status_str[i]; i++) {
172 		if (s & 1) {
173 			/* if overflow */
174 			if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
175 				break;
176 			if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
177 					PAGE_SIZE)
178 				break;
179 			any = 1;
180 		}
181 		s >>= 1;
182 	}
183 	if (any)
184 		strlcat(buf, "\n", PAGE_SIZE);
185 
186 	ret = strlen(buf);
187 
188 bail:
189 	return ret;
190 }
191 static IB_PORT_ATTR_RO(status_str);
192 
193 /* end of per-port functions */
194 
195 static struct attribute *port_linkcontrol_attributes[] = {
196 	&ib_port_attr_loopback.attr,
197 	&ib_port_attr_led_override.attr,
198 	&ib_port_attr_hrtbt_enable.attr,
199 	&ib_port_attr_status.attr,
200 	&ib_port_attr_status_str.attr,
201 	NULL
202 };
203 
204 static const struct attribute_group port_linkcontrol_group = {
205 	.name = "linkcontrol",
206 	.attrs = port_linkcontrol_attributes,
207 };
208 
209 /*
210  * Start of per-port congestion control structures and support code
211  */
212 
213 /*
214  * Congestion control table size followed by table entries
215  */
cc_table_bin_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)216 static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
217 				 struct bin_attribute *bin_attr, char *buf,
218 				 loff_t pos, size_t count)
219 {
220 	struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
221 	int ret;
222 
223 	if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
224 		return -EINVAL;
225 
226 	ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
227 		 + sizeof(__be16);
228 
229 	if (pos > ret)
230 		return -EINVAL;
231 
232 	if (count > ret - pos)
233 		count = ret - pos;
234 
235 	if (!count)
236 		return count;
237 
238 	spin_lock(&ppd->cc_shadow_lock);
239 	memcpy(buf, ppd->ccti_entries_shadow, count);
240 	spin_unlock(&ppd->cc_shadow_lock);
241 
242 	return count;
243 }
244 static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
245 
246 /*
247  * Congestion settings: port control, control map and an array of 16
248  * entries for the congestion entries - increase, timer, event log
249  * trigger threshold and the minimum injection rate delay.
250  */
cc_setting_bin_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)251 static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
252 				   struct bin_attribute *bin_attr, char *buf,
253 				   loff_t pos, size_t count)
254 {
255 	struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
256 	int ret;
257 
258 	if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
259 		return -EINVAL;
260 
261 	ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
262 
263 	if (pos > ret)
264 		return -EINVAL;
265 	if (count > ret - pos)
266 		count = ret - pos;
267 
268 	if (!count)
269 		return count;
270 
271 	spin_lock(&ppd->cc_shadow_lock);
272 	memcpy(buf, ppd->congestion_entries_shadow, count);
273 	spin_unlock(&ppd->cc_shadow_lock);
274 
275 	return count;
276 }
277 static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
278 
279 static struct bin_attribute *port_ccmgta_attributes[] = {
280 	&bin_attr_cc_setting_bin,
281 	&bin_attr_cc_table_bin,
282 	NULL,
283 };
284 
qib_ccmgta_is_bin_visible(struct kobject * kobj,struct bin_attribute * attr,int n)285 static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
286 				 struct bin_attribute *attr, int n)
287 {
288 	struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
289 
290 	if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
291 		return 0;
292 	return attr->attr.mode;
293 }
294 
295 static const struct attribute_group port_ccmgta_attribute_group = {
296 	.name = "CCMgtA",
297 	.is_bin_visible = qib_ccmgta_is_bin_visible,
298 	.bin_attrs = port_ccmgta_attributes,
299 };
300 
301 /* Start sl2vl */
302 
303 struct qib_sl2vl_attr {
304 	struct ib_port_attribute attr;
305 	int sl;
306 };
307 
sl2vl_attr_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)308 static ssize_t sl2vl_attr_show(struct ib_device *ibdev, u32 port_num,
309 			       struct ib_port_attribute *attr, char *buf)
310 {
311 	struct qib_sl2vl_attr *sattr =
312 		container_of(attr, struct qib_sl2vl_attr, attr);
313 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
314 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
315 
316 	return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
317 }
318 
319 #define QIB_SL2VL_ATTR(N)                                                      \
320 	static struct qib_sl2vl_attr qib_sl2vl_attr_##N = {                    \
321 		.attr = __ATTR(N, 0444, sl2vl_attr_show, NULL),                \
322 		.sl = N,                                                       \
323 	}
324 
325 QIB_SL2VL_ATTR(0);
326 QIB_SL2VL_ATTR(1);
327 QIB_SL2VL_ATTR(2);
328 QIB_SL2VL_ATTR(3);
329 QIB_SL2VL_ATTR(4);
330 QIB_SL2VL_ATTR(5);
331 QIB_SL2VL_ATTR(6);
332 QIB_SL2VL_ATTR(7);
333 QIB_SL2VL_ATTR(8);
334 QIB_SL2VL_ATTR(9);
335 QIB_SL2VL_ATTR(10);
336 QIB_SL2VL_ATTR(11);
337 QIB_SL2VL_ATTR(12);
338 QIB_SL2VL_ATTR(13);
339 QIB_SL2VL_ATTR(14);
340 QIB_SL2VL_ATTR(15);
341 
342 static struct attribute *port_sl2vl_attributes[] = {
343 	&qib_sl2vl_attr_0.attr.attr,
344 	&qib_sl2vl_attr_1.attr.attr,
345 	&qib_sl2vl_attr_2.attr.attr,
346 	&qib_sl2vl_attr_3.attr.attr,
347 	&qib_sl2vl_attr_4.attr.attr,
348 	&qib_sl2vl_attr_5.attr.attr,
349 	&qib_sl2vl_attr_6.attr.attr,
350 	&qib_sl2vl_attr_7.attr.attr,
351 	&qib_sl2vl_attr_8.attr.attr,
352 	&qib_sl2vl_attr_9.attr.attr,
353 	&qib_sl2vl_attr_10.attr.attr,
354 	&qib_sl2vl_attr_11.attr.attr,
355 	&qib_sl2vl_attr_12.attr.attr,
356 	&qib_sl2vl_attr_13.attr.attr,
357 	&qib_sl2vl_attr_14.attr.attr,
358 	&qib_sl2vl_attr_15.attr.attr,
359 	NULL
360 };
361 
362 static const struct attribute_group port_sl2vl_group = {
363 	.name = "sl2vl",
364 	.attrs = port_sl2vl_attributes,
365 };
366 
367 /* End sl2vl */
368 
369 /* Start diag_counters */
370 
371 struct qib_diagc_attr {
372 	struct ib_port_attribute attr;
373 	size_t counter;
374 };
375 
diagc_attr_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)376 static ssize_t diagc_attr_show(struct ib_device *ibdev, u32 port_num,
377 			       struct ib_port_attribute *attr, char *buf)
378 {
379 	struct qib_diagc_attr *dattr =
380 		container_of(attr, struct qib_diagc_attr, attr);
381 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
382 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
383 
384 	return sysfs_emit(buf, "%llu\n", *((u64 *)qibp + dattr->counter));
385 }
386 
diagc_attr_store(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,const char * buf,size_t count)387 static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
388 				struct ib_port_attribute *attr, const char *buf,
389 				size_t count)
390 {
391 	struct qib_diagc_attr *dattr =
392 		container_of(attr, struct qib_diagc_attr, attr);
393 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
394 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
395 	u64 val;
396 	int ret;
397 
398 	ret = kstrtou64(buf, 0, &val);
399 	if (ret)
400 		return ret;
401 	*((u64 *)qibp + dattr->counter) = val;
402 	return count;
403 }
404 
405 #define QIB_DIAGC_ATTR(N)                                                      \
406 	static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64));  \
407 	static struct qib_diagc_attr qib_diagc_attr_##N = {                    \
408 		.attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store),    \
409 		.counter =                                                     \
410 			offsetof(struct qib_ibport, rvp.n_##N) / sizeof(u64)   \
411 	}
412 
413 QIB_DIAGC_ATTR(rc_resends);
414 QIB_DIAGC_ATTR(seq_naks);
415 QIB_DIAGC_ATTR(rdma_seq);
416 QIB_DIAGC_ATTR(rnr_naks);
417 QIB_DIAGC_ATTR(other_naks);
418 QIB_DIAGC_ATTR(rc_timeouts);
419 QIB_DIAGC_ATTR(loop_pkts);
420 QIB_DIAGC_ATTR(pkt_drops);
421 QIB_DIAGC_ATTR(dmawait);
422 QIB_DIAGC_ATTR(unaligned);
423 QIB_DIAGC_ATTR(rc_dupreq);
424 QIB_DIAGC_ATTR(rc_seqnak);
425 QIB_DIAGC_ATTR(rc_crwaits);
426 
get_all_cpu_total(u64 __percpu * cntr)427 static u64 get_all_cpu_total(u64 __percpu *cntr)
428 {
429 	int cpu;
430 	u64 counter = 0;
431 
432 	for_each_possible_cpu(cpu)
433 		counter += *per_cpu_ptr(cntr, cpu);
434 	return counter;
435 }
436 
qib_store_per_cpu(struct qib_devdata * dd,const char * buf,size_t count,u64 * zero,u64 cur)437 static ssize_t qib_store_per_cpu(struct qib_devdata *dd, const char *buf,
438 				 size_t count, u64 *zero, u64 cur)
439 {
440 	u32 val;
441 	int ret;
442 
443 	ret = kstrtou32(buf, 0, &val);
444 	if (ret)
445 		return ret;
446 	if (val != 0) {
447 		qib_dev_err(dd, "Per CPU cntrs can only be zeroed");
448 		return count;
449 	}
450 	*zero = cur;
451 	return count;
452 }
453 
rc_acks_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)454 static ssize_t rc_acks_show(struct ib_device *ibdev, u32 port_num,
455 			    struct ib_port_attribute *attr, char *buf)
456 {
457 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
458 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
459 
460 	return sysfs_emit(buf, "%llu\n",
461 			  get_all_cpu_total(qibp->rvp.rc_acks) -
462 				  qibp->rvp.z_rc_acks);
463 }
464 
rc_acks_store(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,const char * buf,size_t count)465 static ssize_t rc_acks_store(struct ib_device *ibdev, u32 port_num,
466 			     struct ib_port_attribute *attr, const char *buf,
467 			     size_t count)
468 {
469 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
470 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
471 
472 	return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_acks,
473 				 get_all_cpu_total(qibp->rvp.rc_acks));
474 }
475 static IB_PORT_ATTR_RW(rc_acks);
476 
rc_qacks_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)477 static ssize_t rc_qacks_show(struct ib_device *ibdev, u32 port_num,
478 			     struct ib_port_attribute *attr, char *buf)
479 {
480 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
481 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
482 
483 	return sysfs_emit(buf, "%llu\n",
484 			  get_all_cpu_total(qibp->rvp.rc_qacks) -
485 				  qibp->rvp.z_rc_qacks);
486 }
487 
rc_qacks_store(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,const char * buf,size_t count)488 static ssize_t rc_qacks_store(struct ib_device *ibdev, u32 port_num,
489 			      struct ib_port_attribute *attr, const char *buf,
490 			      size_t count)
491 {
492 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
493 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
494 
495 	return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_qacks,
496 				 get_all_cpu_total(qibp->rvp.rc_qacks));
497 }
498 static IB_PORT_ATTR_RW(rc_qacks);
499 
rc_delayed_comp_show(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,char * buf)500 static ssize_t rc_delayed_comp_show(struct ib_device *ibdev, u32 port_num,
501 				    struct ib_port_attribute *attr, char *buf)
502 {
503 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
504 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
505 
506 	return sysfs_emit(buf, "%llu\n",
507 			 get_all_cpu_total(qibp->rvp.rc_delayed_comp) -
508 				 qibp->rvp.z_rc_delayed_comp);
509 }
510 
rc_delayed_comp_store(struct ib_device * ibdev,u32 port_num,struct ib_port_attribute * attr,const char * buf,size_t count)511 static ssize_t rc_delayed_comp_store(struct ib_device *ibdev, u32 port_num,
512 				     struct ib_port_attribute *attr,
513 				     const char *buf, size_t count)
514 {
515 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
516 	struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
517 
518 	return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_delayed_comp,
519 				 get_all_cpu_total(qibp->rvp.rc_delayed_comp));
520 }
521 static IB_PORT_ATTR_RW(rc_delayed_comp);
522 
523 static struct attribute *port_diagc_attributes[] = {
524 	&qib_diagc_attr_rc_resends.attr.attr,
525 	&qib_diagc_attr_seq_naks.attr.attr,
526 	&qib_diagc_attr_rdma_seq.attr.attr,
527 	&qib_diagc_attr_rnr_naks.attr.attr,
528 	&qib_diagc_attr_other_naks.attr.attr,
529 	&qib_diagc_attr_rc_timeouts.attr.attr,
530 	&qib_diagc_attr_loop_pkts.attr.attr,
531 	&qib_diagc_attr_pkt_drops.attr.attr,
532 	&qib_diagc_attr_dmawait.attr.attr,
533 	&qib_diagc_attr_unaligned.attr.attr,
534 	&qib_diagc_attr_rc_dupreq.attr.attr,
535 	&qib_diagc_attr_rc_seqnak.attr.attr,
536 	&qib_diagc_attr_rc_crwaits.attr.attr,
537 	&ib_port_attr_rc_acks.attr,
538 	&ib_port_attr_rc_qacks.attr,
539 	&ib_port_attr_rc_delayed_comp.attr,
540 	NULL
541 };
542 
543 static const struct attribute_group port_diagc_group = {
544 	.name = "diag_counters",
545 	.attrs = port_diagc_attributes,
546 };
547 
548 /* End diag_counters */
549 
550 const struct attribute_group *qib_attr_port_groups[] = {
551 	&port_linkcontrol_group,
552 	&port_ccmgta_attribute_group,
553 	&port_sl2vl_group,
554 	&port_diagc_group,
555 	NULL,
556 };
557 
558 /* end of per-port file structures and support code */
559 
560 /*
561  * Start of per-unit (or driver, in some cases, but replicated
562  * per unit) functions (these get a device *)
563  */
hw_rev_show(struct device * device,struct device_attribute * attr,char * buf)564 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
565 			   char *buf)
566 {
567 	struct qib_ibdev *dev =
568 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
569 
570 	return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
571 }
572 static DEVICE_ATTR_RO(hw_rev);
573 
hca_type_show(struct device * device,struct device_attribute * attr,char * buf)574 static ssize_t hca_type_show(struct device *device,
575 			     struct device_attribute *attr, char *buf)
576 {
577 	struct qib_ibdev *dev =
578 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
579 	struct qib_devdata *dd = dd_from_dev(dev);
580 
581 	if (!dd->boardname)
582 		return -EINVAL;
583 	return sysfs_emit(buf, "%s\n", dd->boardname);
584 }
585 static DEVICE_ATTR_RO(hca_type);
586 static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
587 
version_show(struct device * device,struct device_attribute * attr,char * buf)588 static ssize_t version_show(struct device *device,
589 			    struct device_attribute *attr, char *buf)
590 {
591 	/* The string printed here is already newline-terminated. */
592 	return sysfs_emit(buf, "%s", (char *)ib_qib_version);
593 }
594 static DEVICE_ATTR_RO(version);
595 
boardversion_show(struct device * device,struct device_attribute * attr,char * buf)596 static ssize_t boardversion_show(struct device *device,
597 				 struct device_attribute *attr, char *buf)
598 {
599 	struct qib_ibdev *dev =
600 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
601 	struct qib_devdata *dd = dd_from_dev(dev);
602 
603 	/* The string printed here is already newline-terminated. */
604 	return sysfs_emit(buf, "%s", dd->boardversion);
605 }
606 static DEVICE_ATTR_RO(boardversion);
607 
localbus_info_show(struct device * device,struct device_attribute * attr,char * buf)608 static ssize_t localbus_info_show(struct device *device,
609 				  struct device_attribute *attr, char *buf)
610 {
611 	struct qib_ibdev *dev =
612 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
613 	struct qib_devdata *dd = dd_from_dev(dev);
614 
615 	/* The string printed here is already newline-terminated. */
616 	return sysfs_emit(buf, "%s", dd->lbus_info);
617 }
618 static DEVICE_ATTR_RO(localbus_info);
619 
nctxts_show(struct device * device,struct device_attribute * attr,char * buf)620 static ssize_t nctxts_show(struct device *device,
621 			   struct device_attribute *attr, char *buf)
622 {
623 	struct qib_ibdev *dev =
624 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
625 	struct qib_devdata *dd = dd_from_dev(dev);
626 
627 	/* Return the number of user ports (contexts) available. */
628 	/* The calculation below deals with a special case where
629 	 * cfgctxts is set to 1 on a single-port board. */
630 	return sysfs_emit(buf, "%u\n",
631 			  (dd->first_user_ctxt > dd->cfgctxts) ?
632 				  0 :
633 				  (dd->cfgctxts - dd->first_user_ctxt));
634 }
635 static DEVICE_ATTR_RO(nctxts);
636 
nfreectxts_show(struct device * device,struct device_attribute * attr,char * buf)637 static ssize_t nfreectxts_show(struct device *device,
638 			       struct device_attribute *attr, char *buf)
639 {
640 	struct qib_ibdev *dev =
641 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
642 	struct qib_devdata *dd = dd_from_dev(dev);
643 
644 	/* Return the number of free user ports (contexts) available. */
645 	return sysfs_emit(buf, "%u\n", dd->freectxts);
646 }
647 static DEVICE_ATTR_RO(nfreectxts);
648 
serial_show(struct device * device,struct device_attribute * attr,char * buf)649 static ssize_t serial_show(struct device *device, struct device_attribute *attr,
650 			   char *buf)
651 {
652 	struct qib_ibdev *dev =
653 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
654 	struct qib_devdata *dd = dd_from_dev(dev);
655 	const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial));
656 	int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial);
657 
658 	return sysfs_emit(buf, ".%*s\n", size, dd->serial);
659 }
660 static DEVICE_ATTR_RO(serial);
661 
chip_reset_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)662 static ssize_t chip_reset_store(struct device *device,
663 				struct device_attribute *attr, const char *buf,
664 				size_t count)
665 {
666 	struct qib_ibdev *dev =
667 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
668 	struct qib_devdata *dd = dd_from_dev(dev);
669 	int ret;
670 
671 	if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
672 		ret = -EINVAL;
673 		goto bail;
674 	}
675 
676 	ret = qib_reset_device(dd->unit);
677 bail:
678 	return ret < 0 ? ret : count;
679 }
680 static DEVICE_ATTR_WO(chip_reset);
681 
682 /*
683  * Dump tempsense regs. in decimal, to ease shell-scripts.
684  */
tempsense_show(struct device * device,struct device_attribute * attr,char * buf)685 static ssize_t tempsense_show(struct device *device,
686 			      struct device_attribute *attr, char *buf)
687 {
688 	struct qib_ibdev *dev =
689 		rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
690 	struct qib_devdata *dd = dd_from_dev(dev);
691 	int i;
692 	u8 regvals[8];
693 
694 	for (i = 0; i < 8; i++) {
695 		int ret;
696 
697 		if (i == 6)
698 			continue;
699 		ret = dd->f_tempsense_rd(dd, i);
700 		if (ret < 0)
701 			return ret;	/* return error on bad read */
702 		regvals[i] = ret;
703 	}
704 	return sysfs_emit(buf, "%d %d %02X %02X %d %d\n",
705 			  (signed char)regvals[0],
706 			  (signed char)regvals[1],
707 			  regvals[2],
708 			  regvals[3],
709 			  (signed char)regvals[5],
710 			  (signed char)regvals[7]);
711 }
712 static DEVICE_ATTR_RO(tempsense);
713 
714 /*
715  * end of per-unit (or driver, in some cases, but replicated
716  * per unit) functions
717  */
718 
719 /* start of per-unit file structures and support code */
720 static struct attribute *qib_attributes[] = {
721 	&dev_attr_hw_rev.attr,
722 	&dev_attr_hca_type.attr,
723 	&dev_attr_board_id.attr,
724 	&dev_attr_version.attr,
725 	&dev_attr_nctxts.attr,
726 	&dev_attr_nfreectxts.attr,
727 	&dev_attr_serial.attr,
728 	&dev_attr_boardversion.attr,
729 	&dev_attr_tempsense.attr,
730 	&dev_attr_localbus_info.attr,
731 	&dev_attr_chip_reset.attr,
732 	NULL,
733 };
734 
735 const struct attribute_group qib_attr_group = {
736 	.attrs = qib_attributes,
737 };
738