• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <linux/ctype.h>
35 
36 #include "qib.h"
37 #include "qib_mad.h"
38 
39 /* start of per-port functions */
40 /*
41  * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
42  */
show_hrtbt_enb(struct qib_pportdata * ppd,char * buf)43 static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
44 {
45 	struct qib_devdata *dd = ppd->dd;
46 	int ret;
47 
48 	ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
49 	ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
50 	return ret;
51 }
52 
store_hrtbt_enb(struct qib_pportdata * ppd,const char * buf,size_t count)53 static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
54 			       size_t count)
55 {
56 	struct qib_devdata *dd = ppd->dd;
57 	int ret;
58 	u16 val;
59 
60 	ret = kstrtou16(buf, 0, &val);
61 	if (ret) {
62 		qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
63 		return ret;
64 	}
65 
66 	/*
67 	 * Set the "intentional" heartbeat enable per either of
68 	 * "Enable" and "Auto", as these are normally set together.
69 	 * This bit is consulted when leaving loopback mode,
70 	 * because entering loopback mode overrides it and automatically
71 	 * disables heartbeat.
72 	 */
73 	ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
74 	return ret < 0 ? ret : count;
75 }
76 
store_loopback(struct qib_pportdata * ppd,const char * buf,size_t count)77 static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
78 			      size_t count)
79 {
80 	struct qib_devdata *dd = ppd->dd;
81 	int ret = count, r;
82 
83 	r = dd->f_set_ib_loopback(ppd, buf);
84 	if (r < 0)
85 		ret = r;
86 
87 	return ret;
88 }
89 
store_led_override(struct qib_pportdata * ppd,const char * buf,size_t count)90 static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
91 				  size_t count)
92 {
93 	struct qib_devdata *dd = ppd->dd;
94 	int ret;
95 	u16 val;
96 
97 	ret = kstrtou16(buf, 0, &val);
98 	if (ret) {
99 		qib_dev_err(dd, "attempt to set invalid LED override\n");
100 		return ret;
101 	}
102 
103 	qib_set_led_override(ppd, val);
104 	return count;
105 }
106 
show_status(struct qib_pportdata * ppd,char * buf)107 static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
108 {
109 	ssize_t ret;
110 
111 	if (!ppd->statusp)
112 		ret = -EINVAL;
113 	else
114 		ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
115 				(unsigned long long) *(ppd->statusp));
116 	return ret;
117 }
118 
119 /*
120  * For userland compatibility, these offsets must remain fixed.
121  * They are strings for QIB_STATUS_*
122  */
123 static const char * const qib_status_str[] = {
124 	"Initted",
125 	"",
126 	"",
127 	"",
128 	"",
129 	"Present",
130 	"IB_link_up",
131 	"IB_configured",
132 	"",
133 	"Fatal_Hardware_Error",
134 	NULL,
135 };
136 
show_status_str(struct qib_pportdata * ppd,char * buf)137 static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
138 {
139 	int i, any;
140 	u64 s;
141 	ssize_t ret;
142 
143 	if (!ppd->statusp) {
144 		ret = -EINVAL;
145 		goto bail;
146 	}
147 
148 	s = *(ppd->statusp);
149 	*buf = '\0';
150 	for (any = i = 0; s && qib_status_str[i]; i++) {
151 		if (s & 1) {
152 			/* if overflow */
153 			if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
154 				break;
155 			if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
156 					PAGE_SIZE)
157 				break;
158 			any = 1;
159 		}
160 		s >>= 1;
161 	}
162 	if (any)
163 		strlcat(buf, "\n", PAGE_SIZE);
164 
165 	ret = strlen(buf);
166 
167 bail:
168 	return ret;
169 }
170 
171 /* end of per-port functions */
172 
173 /*
174  * Start of per-port file structures and support code
175  * Because we are fitting into other infrastructure, we have to supply the
176  * full set of kobject/sysfs_ops structures and routines.
177  */
178 #define QIB_PORT_ATTR(name, mode, show, store) \
179 	static struct qib_port_attr qib_port_attr_##name = \
180 		__ATTR(name, mode, show, store)
181 
182 struct qib_port_attr {
183 	struct attribute attr;
184 	ssize_t (*show)(struct qib_pportdata *, char *);
185 	ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
186 };
187 
188 QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
189 QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
190 QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
191 	      store_hrtbt_enb);
192 QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
193 QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
194 
195 static struct attribute *port_default_attributes[] = {
196 	&qib_port_attr_loopback.attr,
197 	&qib_port_attr_led_override.attr,
198 	&qib_port_attr_hrtbt_enable.attr,
199 	&qib_port_attr_status.attr,
200 	&qib_port_attr_status_str.attr,
201 	NULL
202 };
203 
204 /*
205  * Start of per-port congestion control structures and support code
206  */
207 
208 /*
209  * Congestion control table size followed by table entries
210  */
read_cc_table_bin(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)211 static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
212 		struct bin_attribute *bin_attr,
213 		char *buf, loff_t pos, size_t count)
214 {
215 	int ret;
216 	struct qib_pportdata *ppd =
217 		container_of(kobj, struct qib_pportdata, pport_cc_kobj);
218 
219 	if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
220 		return -EINVAL;
221 
222 	ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
223 		 + sizeof(__be16);
224 
225 	if (pos > ret)
226 		return -EINVAL;
227 
228 	if (count > ret - pos)
229 		count = ret - pos;
230 
231 	if (!count)
232 		return count;
233 
234 	spin_lock(&ppd->cc_shadow_lock);
235 	memcpy(buf, ppd->ccti_entries_shadow, count);
236 	spin_unlock(&ppd->cc_shadow_lock);
237 
238 	return count;
239 }
240 
qib_port_release(struct kobject * kobj)241 static void qib_port_release(struct kobject *kobj)
242 {
243 	/* nothing to do since memory is freed by qib_free_devdata() */
244 }
245 
246 static struct kobj_type qib_port_cc_ktype = {
247 	.release = qib_port_release,
248 };
249 
250 static const struct bin_attribute cc_table_bin_attr = {
251 	.attr = {.name = "cc_table_bin", .mode = 0444},
252 	.read = read_cc_table_bin,
253 	.size = PAGE_SIZE,
254 };
255 
256 /*
257  * Congestion settings: port control, control map and an array of 16
258  * entries for the congestion entries - increase, timer, event log
259  * trigger threshold and the minimum injection rate delay.
260  */
read_cc_setting_bin(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)261 static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
262 		struct bin_attribute *bin_attr,
263 		char *buf, loff_t pos, size_t count)
264 {
265 	int ret;
266 	struct qib_pportdata *ppd =
267 		container_of(kobj, struct qib_pportdata, pport_cc_kobj);
268 
269 	if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
270 		return -EINVAL;
271 
272 	ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
273 
274 	if (pos > ret)
275 		return -EINVAL;
276 	if (count > ret - pos)
277 		count = ret - pos;
278 
279 	if (!count)
280 		return count;
281 
282 	spin_lock(&ppd->cc_shadow_lock);
283 	memcpy(buf, ppd->congestion_entries_shadow, count);
284 	spin_unlock(&ppd->cc_shadow_lock);
285 
286 	return count;
287 }
288 
289 static const struct bin_attribute cc_setting_bin_attr = {
290 	.attr = {.name = "cc_settings_bin", .mode = 0444},
291 	.read = read_cc_setting_bin,
292 	.size = PAGE_SIZE,
293 };
294 
295 
qib_portattr_show(struct kobject * kobj,struct attribute * attr,char * buf)296 static ssize_t qib_portattr_show(struct kobject *kobj,
297 	struct attribute *attr, char *buf)
298 {
299 	struct qib_port_attr *pattr =
300 		container_of(attr, struct qib_port_attr, attr);
301 	struct qib_pportdata *ppd =
302 		container_of(kobj, struct qib_pportdata, pport_kobj);
303 
304 	if (!pattr->show)
305 		return -EIO;
306 
307 	return pattr->show(ppd, buf);
308 }
309 
qib_portattr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)310 static ssize_t qib_portattr_store(struct kobject *kobj,
311 	struct attribute *attr, const char *buf, size_t len)
312 {
313 	struct qib_port_attr *pattr =
314 		container_of(attr, struct qib_port_attr, attr);
315 	struct qib_pportdata *ppd =
316 		container_of(kobj, struct qib_pportdata, pport_kobj);
317 
318 	if (!pattr->store)
319 		return -EIO;
320 
321 	return pattr->store(ppd, buf, len);
322 }
323 
324 
325 static const struct sysfs_ops qib_port_ops = {
326 	.show = qib_portattr_show,
327 	.store = qib_portattr_store,
328 };
329 
330 static struct kobj_type qib_port_ktype = {
331 	.release = qib_port_release,
332 	.sysfs_ops = &qib_port_ops,
333 	.default_attrs = port_default_attributes
334 };
335 
336 /* Start sl2vl */
337 
338 #define QIB_SL2VL_ATTR(N) \
339 	static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
340 		.attr = { .name = __stringify(N), .mode = 0444 }, \
341 		.sl = N \
342 	}
343 
344 struct qib_sl2vl_attr {
345 	struct attribute attr;
346 	int sl;
347 };
348 
349 QIB_SL2VL_ATTR(0);
350 QIB_SL2VL_ATTR(1);
351 QIB_SL2VL_ATTR(2);
352 QIB_SL2VL_ATTR(3);
353 QIB_SL2VL_ATTR(4);
354 QIB_SL2VL_ATTR(5);
355 QIB_SL2VL_ATTR(6);
356 QIB_SL2VL_ATTR(7);
357 QIB_SL2VL_ATTR(8);
358 QIB_SL2VL_ATTR(9);
359 QIB_SL2VL_ATTR(10);
360 QIB_SL2VL_ATTR(11);
361 QIB_SL2VL_ATTR(12);
362 QIB_SL2VL_ATTR(13);
363 QIB_SL2VL_ATTR(14);
364 QIB_SL2VL_ATTR(15);
365 
366 static struct attribute *sl2vl_default_attributes[] = {
367 	&qib_sl2vl_attr_0.attr,
368 	&qib_sl2vl_attr_1.attr,
369 	&qib_sl2vl_attr_2.attr,
370 	&qib_sl2vl_attr_3.attr,
371 	&qib_sl2vl_attr_4.attr,
372 	&qib_sl2vl_attr_5.attr,
373 	&qib_sl2vl_attr_6.attr,
374 	&qib_sl2vl_attr_7.attr,
375 	&qib_sl2vl_attr_8.attr,
376 	&qib_sl2vl_attr_9.attr,
377 	&qib_sl2vl_attr_10.attr,
378 	&qib_sl2vl_attr_11.attr,
379 	&qib_sl2vl_attr_12.attr,
380 	&qib_sl2vl_attr_13.attr,
381 	&qib_sl2vl_attr_14.attr,
382 	&qib_sl2vl_attr_15.attr,
383 	NULL
384 };
385 
sl2vl_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)386 static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
387 			       char *buf)
388 {
389 	struct qib_sl2vl_attr *sattr =
390 		container_of(attr, struct qib_sl2vl_attr, attr);
391 	struct qib_pportdata *ppd =
392 		container_of(kobj, struct qib_pportdata, sl2vl_kobj);
393 	struct qib_ibport *qibp = &ppd->ibport_data;
394 
395 	return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
396 }
397 
398 static const struct sysfs_ops qib_sl2vl_ops = {
399 	.show = sl2vl_attr_show,
400 };
401 
402 static struct kobj_type qib_sl2vl_ktype = {
403 	.release = qib_port_release,
404 	.sysfs_ops = &qib_sl2vl_ops,
405 	.default_attrs = sl2vl_default_attributes
406 };
407 
408 /* End sl2vl */
409 
410 /* Start diag_counters */
411 
412 #define QIB_DIAGC_ATTR(N) \
413 	static struct qib_diagc_attr qib_diagc_attr_##N = { \
414 		.attr = { .name = __stringify(N), .mode = 0664 }, \
415 		.counter = offsetof(struct qib_ibport, rvp.n_##N) \
416 	}
417 
418 #define QIB_DIAGC_ATTR_PER_CPU(N) \
419 	static struct qib_diagc_attr qib_diagc_attr_##N = { \
420 		.attr = { .name = __stringify(N), .mode = 0664 }, \
421 		.counter = offsetof(struct qib_ibport, rvp.z_##N) \
422 	}
423 
424 struct qib_diagc_attr {
425 	struct attribute attr;
426 	size_t counter;
427 };
428 
429 QIB_DIAGC_ATTR_PER_CPU(rc_acks);
430 QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
431 QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
432 
433 QIB_DIAGC_ATTR(rc_resends);
434 QIB_DIAGC_ATTR(seq_naks);
435 QIB_DIAGC_ATTR(rdma_seq);
436 QIB_DIAGC_ATTR(rnr_naks);
437 QIB_DIAGC_ATTR(other_naks);
438 QIB_DIAGC_ATTR(rc_timeouts);
439 QIB_DIAGC_ATTR(loop_pkts);
440 QIB_DIAGC_ATTR(pkt_drops);
441 QIB_DIAGC_ATTR(dmawait);
442 QIB_DIAGC_ATTR(unaligned);
443 QIB_DIAGC_ATTR(rc_dupreq);
444 QIB_DIAGC_ATTR(rc_seqnak);
445 
446 static struct attribute *diagc_default_attributes[] = {
447 	&qib_diagc_attr_rc_resends.attr,
448 	&qib_diagc_attr_rc_acks.attr,
449 	&qib_diagc_attr_rc_qacks.attr,
450 	&qib_diagc_attr_rc_delayed_comp.attr,
451 	&qib_diagc_attr_seq_naks.attr,
452 	&qib_diagc_attr_rdma_seq.attr,
453 	&qib_diagc_attr_rnr_naks.attr,
454 	&qib_diagc_attr_other_naks.attr,
455 	&qib_diagc_attr_rc_timeouts.attr,
456 	&qib_diagc_attr_loop_pkts.attr,
457 	&qib_diagc_attr_pkt_drops.attr,
458 	&qib_diagc_attr_dmawait.attr,
459 	&qib_diagc_attr_unaligned.attr,
460 	&qib_diagc_attr_rc_dupreq.attr,
461 	&qib_diagc_attr_rc_seqnak.attr,
462 	NULL
463 };
464 
get_all_cpu_total(u64 __percpu * cntr)465 static u64 get_all_cpu_total(u64 __percpu *cntr)
466 {
467 	int cpu;
468 	u64 counter = 0;
469 
470 	for_each_possible_cpu(cpu)
471 		counter += *per_cpu_ptr(cntr, cpu);
472 	return counter;
473 }
474 
475 #define def_write_per_cpu(cntr) \
476 static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data)	\
477 {									\
478 	struct qib_devdata *dd = ppd->dd;				\
479 	struct qib_ibport *qibp = &ppd->ibport_data;			\
480 	/*  A write can only zero the counter */			\
481 	if (data == 0)							\
482 		qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
483 	else								\
484 		qib_dev_err(dd, "Per CPU cntrs can only be zeroed");	\
485 }
486 
487 def_write_per_cpu(rc_acks)
def_write_per_cpu(rc_qacks)488 def_write_per_cpu(rc_qacks)
489 def_write_per_cpu(rc_delayed_comp)
490 
491 #define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
492 							qibp->rvp.z_##cntr)
493 
494 static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
495 			       char *buf)
496 {
497 	struct qib_diagc_attr *dattr =
498 		container_of(attr, struct qib_diagc_attr, attr);
499 	struct qib_pportdata *ppd =
500 		container_of(kobj, struct qib_pportdata, diagc_kobj);
501 	struct qib_ibport *qibp = &ppd->ibport_data;
502 
503 	if (!strncmp(dattr->attr.name, "rc_acks", 7))
504 		return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
505 	else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
506 		return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
507 	else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
508 		return sprintf(buf, "%llu\n",
509 					READ_PER_CPU_CNTR(rc_delayed_comp));
510 	else
511 		return sprintf(buf, "%u\n",
512 				*(u32 *)((char *)qibp + dattr->counter));
513 }
514 
diagc_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t size)515 static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
516 				const char *buf, size_t size)
517 {
518 	struct qib_diagc_attr *dattr =
519 		container_of(attr, struct qib_diagc_attr, attr);
520 	struct qib_pportdata *ppd =
521 		container_of(kobj, struct qib_pportdata, diagc_kobj);
522 	struct qib_ibport *qibp = &ppd->ibport_data;
523 	u32 val;
524 	int ret;
525 
526 	ret = kstrtou32(buf, 0, &val);
527 	if (ret)
528 		return ret;
529 
530 	if (!strncmp(dattr->attr.name, "rc_acks", 7))
531 		write_per_cpu_rc_acks(ppd, val);
532 	else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
533 		write_per_cpu_rc_qacks(ppd, val);
534 	else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
535 		write_per_cpu_rc_delayed_comp(ppd, val);
536 	else
537 		*(u32 *)((char *)qibp + dattr->counter) = val;
538 	return size;
539 }
540 
541 static const struct sysfs_ops qib_diagc_ops = {
542 	.show = diagc_attr_show,
543 	.store = diagc_attr_store,
544 };
545 
546 static struct kobj_type qib_diagc_ktype = {
547 	.release = qib_port_release,
548 	.sysfs_ops = &qib_diagc_ops,
549 	.default_attrs = diagc_default_attributes
550 };
551 
552 /* End diag_counters */
553 
554 /* end of per-port file structures and support code */
555 
556 /*
557  * Start of per-unit (or driver, in some cases, but replicated
558  * per unit) functions (these get a device *)
559  */
show_rev(struct device * device,struct device_attribute * attr,char * buf)560 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
561 			char *buf)
562 {
563 	struct qib_ibdev *dev =
564 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
565 
566 	return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
567 }
568 
show_hca(struct device * device,struct device_attribute * attr,char * buf)569 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
570 			char *buf)
571 {
572 	struct qib_ibdev *dev =
573 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
574 	struct qib_devdata *dd = dd_from_dev(dev);
575 	int ret;
576 
577 	if (!dd->boardname)
578 		ret = -EINVAL;
579 	else
580 		ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
581 	return ret;
582 }
583 
show_version(struct device * device,struct device_attribute * attr,char * buf)584 static ssize_t show_version(struct device *device,
585 			    struct device_attribute *attr, char *buf)
586 {
587 	/* The string printed here is already newline-terminated. */
588 	return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
589 }
590 
show_boardversion(struct device * device,struct device_attribute * attr,char * buf)591 static ssize_t show_boardversion(struct device *device,
592 				 struct device_attribute *attr, char *buf)
593 {
594 	struct qib_ibdev *dev =
595 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
596 	struct qib_devdata *dd = dd_from_dev(dev);
597 
598 	/* The string printed here is already newline-terminated. */
599 	return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
600 }
601 
602 
show_localbus_info(struct device * device,struct device_attribute * attr,char * buf)603 static ssize_t show_localbus_info(struct device *device,
604 				  struct device_attribute *attr, char *buf)
605 {
606 	struct qib_ibdev *dev =
607 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
608 	struct qib_devdata *dd = dd_from_dev(dev);
609 
610 	/* The string printed here is already newline-terminated. */
611 	return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
612 }
613 
614 
show_nctxts(struct device * device,struct device_attribute * attr,char * buf)615 static ssize_t show_nctxts(struct device *device,
616 			   struct device_attribute *attr, char *buf)
617 {
618 	struct qib_ibdev *dev =
619 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
620 	struct qib_devdata *dd = dd_from_dev(dev);
621 
622 	/* Return the number of user ports (contexts) available. */
623 	/* The calculation below deals with a special case where
624 	 * cfgctxts is set to 1 on a single-port board. */
625 	return scnprintf(buf, PAGE_SIZE, "%u\n",
626 			(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
627 			(dd->cfgctxts - dd->first_user_ctxt));
628 }
629 
show_nfreectxts(struct device * device,struct device_attribute * attr,char * buf)630 static ssize_t show_nfreectxts(struct device *device,
631 			   struct device_attribute *attr, char *buf)
632 {
633 	struct qib_ibdev *dev =
634 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
635 	struct qib_devdata *dd = dd_from_dev(dev);
636 
637 	/* Return the number of free user ports (contexts) available. */
638 	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
639 }
640 
show_serial(struct device * device,struct device_attribute * attr,char * buf)641 static ssize_t show_serial(struct device *device,
642 			   struct device_attribute *attr, char *buf)
643 {
644 	struct qib_ibdev *dev =
645 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
646 	struct qib_devdata *dd = dd_from_dev(dev);
647 
648 	buf[sizeof(dd->serial)] = '\0';
649 	memcpy(buf, dd->serial, sizeof(dd->serial));
650 	strcat(buf, "\n");
651 	return strlen(buf);
652 }
653 
store_chip_reset(struct device * device,struct device_attribute * attr,const char * buf,size_t count)654 static ssize_t store_chip_reset(struct device *device,
655 				struct device_attribute *attr, const char *buf,
656 				size_t count)
657 {
658 	struct qib_ibdev *dev =
659 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
660 	struct qib_devdata *dd = dd_from_dev(dev);
661 	int ret;
662 
663 	if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
664 		ret = -EINVAL;
665 		goto bail;
666 	}
667 
668 	ret = qib_reset_device(dd->unit);
669 bail:
670 	return ret < 0 ? ret : count;
671 }
672 
673 /*
674  * Dump tempsense regs. in decimal, to ease shell-scripts.
675  */
show_tempsense(struct device * device,struct device_attribute * attr,char * buf)676 static ssize_t show_tempsense(struct device *device,
677 			      struct device_attribute *attr, char *buf)
678 {
679 	struct qib_ibdev *dev =
680 		container_of(device, struct qib_ibdev, rdi.ibdev.dev);
681 	struct qib_devdata *dd = dd_from_dev(dev);
682 	int ret;
683 	int idx;
684 	u8 regvals[8];
685 
686 	ret = -ENXIO;
687 	for (idx = 0; idx < 8; ++idx) {
688 		if (idx == 6)
689 			continue;
690 		ret = dd->f_tempsense_rd(dd, idx);
691 		if (ret < 0)
692 			break;
693 		regvals[idx] = ret;
694 	}
695 	if (idx == 8)
696 		ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
697 				*(signed char *)(regvals),
698 				*(signed char *)(regvals + 1),
699 				regvals[2], regvals[3],
700 				*(signed char *)(regvals + 5),
701 				*(signed char *)(regvals + 7));
702 	return ret;
703 }
704 
705 /*
706  * end of per-unit (or driver, in some cases, but replicated
707  * per unit) functions
708  */
709 
710 /* start of per-unit file structures and support code */
711 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
712 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
713 static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
714 static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
715 static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
716 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
717 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
718 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
719 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
720 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
721 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
722 
723 static struct device_attribute *qib_attributes[] = {
724 	&dev_attr_hw_rev,
725 	&dev_attr_hca_type,
726 	&dev_attr_board_id,
727 	&dev_attr_version,
728 	&dev_attr_nctxts,
729 	&dev_attr_nfreectxts,
730 	&dev_attr_serial,
731 	&dev_attr_boardversion,
732 	&dev_attr_tempsense,
733 	&dev_attr_localbus_info,
734 	&dev_attr_chip_reset,
735 };
736 
qib_create_port_files(struct ib_device * ibdev,u8 port_num,struct kobject * kobj)737 int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
738 			  struct kobject *kobj)
739 {
740 	struct qib_pportdata *ppd;
741 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
742 	int ret;
743 
744 	if (!port_num || port_num > dd->num_pports) {
745 		qib_dev_err(dd,
746 			"Skipping infiniband class with invalid port %u\n",
747 			port_num);
748 		ret = -ENODEV;
749 		goto bail;
750 	}
751 	ppd = &dd->pport[port_num - 1];
752 
753 	ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
754 				   "linkcontrol");
755 	if (ret) {
756 		qib_dev_err(dd,
757 			"Skipping linkcontrol sysfs info, (err %d) port %u\n",
758 			ret, port_num);
759 		goto bail;
760 	}
761 	kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
762 
763 	ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
764 				   "sl2vl");
765 	if (ret) {
766 		qib_dev_err(dd,
767 			"Skipping sl2vl sysfs info, (err %d) port %u\n",
768 			ret, port_num);
769 		goto bail_link;
770 	}
771 	kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
772 
773 	ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
774 				   "diag_counters");
775 	if (ret) {
776 		qib_dev_err(dd,
777 			"Skipping diag_counters sysfs info, (err %d) port %u\n",
778 			ret, port_num);
779 		goto bail_sl;
780 	}
781 	kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
782 
783 	if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
784 		return 0;
785 
786 	ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
787 				kobj, "CCMgtA");
788 	if (ret) {
789 		qib_dev_err(dd,
790 		 "Skipping Congestion Control sysfs info, (err %d) port %u\n",
791 		 ret, port_num);
792 		goto bail_diagc;
793 	}
794 
795 	kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
796 
797 	ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
798 				&cc_setting_bin_attr);
799 	if (ret) {
800 		qib_dev_err(dd,
801 		 "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
802 		 ret, port_num);
803 		goto bail_cc;
804 	}
805 
806 	ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
807 				&cc_table_bin_attr);
808 	if (ret) {
809 		qib_dev_err(dd,
810 		 "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
811 		 ret, port_num);
812 		goto bail_cc_entry_bin;
813 	}
814 
815 	qib_devinfo(dd->pcidev,
816 		"IB%u: Congestion Control Agent enabled for port %d\n",
817 		dd->unit, port_num);
818 
819 	return 0;
820 
821 bail_cc_entry_bin:
822 	sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
823 bail_cc:
824 	kobject_put(&ppd->pport_cc_kobj);
825 bail_diagc:
826 	kobject_put(&ppd->diagc_kobj);
827 bail_sl:
828 	kobject_put(&ppd->sl2vl_kobj);
829 bail_link:
830 	kobject_put(&ppd->pport_kobj);
831 bail:
832 	return ret;
833 }
834 
835 /*
836  * Register and create our files in /sys/class/infiniband.
837  */
qib_verbs_register_sysfs(struct qib_devdata * dd)838 int qib_verbs_register_sysfs(struct qib_devdata *dd)
839 {
840 	struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
841 	int i, ret;
842 
843 	for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
844 		ret = device_create_file(&dev->dev, qib_attributes[i]);
845 		if (ret)
846 			goto bail;
847 	}
848 
849 	return 0;
850 bail:
851 	for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
852 		device_remove_file(&dev->dev, qib_attributes[i]);
853 	return ret;
854 }
855 
856 /*
857  * Unregister and remove our files in /sys/class/infiniband.
858  */
qib_verbs_unregister_sysfs(struct qib_devdata * dd)859 void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
860 {
861 	struct qib_pportdata *ppd;
862 	int i;
863 
864 	for (i = 0; i < dd->num_pports; i++) {
865 		ppd = &dd->pport[i];
866 		if (qib_cc_table_size &&
867 			ppd->congestion_entries_shadow) {
868 			sysfs_remove_bin_file(&ppd->pport_cc_kobj,
869 				&cc_setting_bin_attr);
870 			sysfs_remove_bin_file(&ppd->pport_cc_kobj,
871 				&cc_table_bin_attr);
872 			kobject_put(&ppd->pport_cc_kobj);
873 		}
874 		kobject_put(&ppd->sl2vl_kobj);
875 		kobject_put(&ppd->pport_kobj);
876 	}
877 }
878