• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Greybus interface code
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 
10 #include <linux/delay.h>
11 
12 #include "greybus.h"
13 #include "greybus_trace.h"
14 
15 #define GB_INTERFACE_MODE_SWITCH_TIMEOUT	2000
16 
17 #define GB_INTERFACE_DEVICE_ID_BAD	0xff
18 
19 #define GB_INTERFACE_AUTOSUSPEND_MS			3000
20 
21 /* Time required for interface to enter standby before disabling REFCLK */
22 #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS			20
23 
24 /* Don't-care selector index */
25 #define DME_SELECTOR_INDEX_NULL		0
26 
27 /* DME attributes */
28 /* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
29 #define DME_T_TST_SRC_INCREMENT		0x4083
30 
31 #define DME_DDBL1_MANUFACTURERID	0x5003
32 #define DME_DDBL1_PRODUCTID		0x5004
33 
34 #define DME_TOSHIBA_GMP_VID		0x6000
35 #define DME_TOSHIBA_GMP_PID		0x6001
36 #define DME_TOSHIBA_GMP_SN0		0x6002
37 #define DME_TOSHIBA_GMP_SN1		0x6003
38 #define DME_TOSHIBA_GMP_INIT_STATUS	0x6101
39 
40 /* DDBL1 Manufacturer and Product ids */
41 #define TOSHIBA_DMID			0x0126
42 #define TOSHIBA_ES2_BRIDGE_DPID		0x1000
43 #define TOSHIBA_ES3_APBRIDGE_DPID	0x1001
44 #define TOSHIBA_ES3_GBPHY_DPID	0x1002
45 
46 static int gb_interface_hibernate_link(struct gb_interface *intf);
47 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
48 
gb_interface_dme_attr_get(struct gb_interface * intf,u16 attr,u32 * val)49 static int gb_interface_dme_attr_get(struct gb_interface *intf,
50 							u16 attr, u32 *val)
51 {
52 	return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
53 					attr, DME_SELECTOR_INDEX_NULL, val);
54 }
55 
gb_interface_read_ara_dme(struct gb_interface * intf)56 static int gb_interface_read_ara_dme(struct gb_interface *intf)
57 {
58 	u32 sn0, sn1;
59 	int ret;
60 
61 	/*
62 	 * Unless this is a Toshiba bridge, bail out until we have defined
63 	 * standard GMP attributes.
64 	 */
65 	if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
66 		dev_err(&intf->dev, "unknown manufacturer %08x\n",
67 				intf->ddbl1_manufacturer_id);
68 		return -ENODEV;
69 	}
70 
71 	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
72 					&intf->vendor_id);
73 	if (ret)
74 		return ret;
75 
76 	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
77 					&intf->product_id);
78 	if (ret)
79 		return ret;
80 
81 	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
82 	if (ret)
83 		return ret;
84 
85 	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
86 	if (ret)
87 		return ret;
88 
89 	intf->serial_number = (u64)sn1 << 32 | sn0;
90 
91 	return 0;
92 }
93 
gb_interface_read_dme(struct gb_interface * intf)94 static int gb_interface_read_dme(struct gb_interface *intf)
95 {
96 	int ret;
97 
98 	/* DME attributes have already been read */
99 	if (intf->dme_read)
100 		return 0;
101 
102 	ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
103 					&intf->ddbl1_manufacturer_id);
104 	if (ret)
105 		return ret;
106 
107 	ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
108 					&intf->ddbl1_product_id);
109 	if (ret)
110 		return ret;
111 
112 	if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
113 			intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
114 		intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
115 		intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
116 	}
117 
118 	ret = gb_interface_read_ara_dme(intf);
119 	if (ret)
120 		return ret;
121 
122 	intf->dme_read = true;
123 
124 	return 0;
125 }
126 
gb_interface_route_create(struct gb_interface * intf)127 static int gb_interface_route_create(struct gb_interface *intf)
128 {
129 	struct gb_svc *svc = intf->hd->svc;
130 	u8 intf_id = intf->interface_id;
131 	u8 device_id;
132 	int ret;
133 
134 	/* Allocate an interface device id. */
135 	ret = ida_simple_get(&svc->device_id_map,
136 			     GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
137 			     GFP_KERNEL);
138 	if (ret < 0) {
139 		dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
140 		return ret;
141 	}
142 	device_id = ret;
143 
144 	ret = gb_svc_intf_device_id(svc, intf_id, device_id);
145 	if (ret) {
146 		dev_err(&intf->dev, "failed to set device id %u: %d\n",
147 				device_id, ret);
148 		goto err_ida_remove;
149 	}
150 
151 	/* FIXME: Hard-coded AP device id. */
152 	ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
153 				  intf_id, device_id);
154 	if (ret) {
155 		dev_err(&intf->dev, "failed to create route: %d\n", ret);
156 		goto err_svc_id_free;
157 	}
158 
159 	intf->device_id = device_id;
160 
161 	return 0;
162 
163 err_svc_id_free:
164 	/*
165 	 * XXX Should we tell SVC that this id doesn't belong to interface
166 	 * XXX anymore.
167 	 */
168 err_ida_remove:
169 	ida_simple_remove(&svc->device_id_map, device_id);
170 
171 	return ret;
172 }
173 
gb_interface_route_destroy(struct gb_interface * intf)174 static void gb_interface_route_destroy(struct gb_interface *intf)
175 {
176 	struct gb_svc *svc = intf->hd->svc;
177 
178 	if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
179 		return;
180 
181 	gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
182 	ida_simple_remove(&svc->device_id_map, intf->device_id);
183 	intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
184 }
185 
186 /* Locking: Caller holds the interface mutex. */
gb_interface_legacy_mode_switch(struct gb_interface * intf)187 static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
188 {
189 	int ret;
190 
191 	dev_info(&intf->dev, "legacy mode switch detected\n");
192 
193 	/* Mark as disconnected to prevent I/O during disable. */
194 	intf->disconnected = true;
195 	gb_interface_disable(intf);
196 	intf->disconnected = false;
197 
198 	ret = gb_interface_enable(intf);
199 	if (ret) {
200 		dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
201 		gb_interface_deactivate(intf);
202 	}
203 
204 	return ret;
205 }
206 
gb_interface_mailbox_event(struct gb_interface * intf,u16 result,u32 mailbox)207 void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
208 								u32 mailbox)
209 {
210 	mutex_lock(&intf->mutex);
211 
212 	if (result) {
213 		dev_warn(&intf->dev,
214 				"mailbox event with UniPro error: 0x%04x\n",
215 				result);
216 		goto err_disable;
217 	}
218 
219 	if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
220 		dev_warn(&intf->dev,
221 				"mailbox event with unexpected value: 0x%08x\n",
222 				mailbox);
223 		goto err_disable;
224 	}
225 
226 	if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
227 		gb_interface_legacy_mode_switch(intf);
228 		goto out_unlock;
229 	}
230 
231 	if (!intf->mode_switch) {
232 		dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
233 				mailbox);
234 		goto err_disable;
235 	}
236 
237 	dev_info(&intf->dev, "mode switch detected\n");
238 
239 	complete(&intf->mode_switch_completion);
240 
241 out_unlock:
242 	mutex_unlock(&intf->mutex);
243 
244 	return;
245 
246 err_disable:
247 	gb_interface_disable(intf);
248 	gb_interface_deactivate(intf);
249 	mutex_unlock(&intf->mutex);
250 }
251 
gb_interface_mode_switch_work(struct work_struct * work)252 static void gb_interface_mode_switch_work(struct work_struct *work)
253 {
254 	struct gb_interface *intf;
255 	struct gb_control *control;
256 	unsigned long timeout;
257 	int ret;
258 
259 	intf = container_of(work, struct gb_interface, mode_switch_work);
260 
261 	mutex_lock(&intf->mutex);
262 	/* Make sure interface is still enabled. */
263 	if (!intf->enabled) {
264 		dev_dbg(&intf->dev, "mode switch aborted\n");
265 		intf->mode_switch = false;
266 		mutex_unlock(&intf->mutex);
267 		goto out_interface_put;
268 	}
269 
270 	/*
271 	 * Prepare the control device for mode switch and make sure to get an
272 	 * extra reference before it goes away during interface disable.
273 	 */
274 	control = gb_control_get(intf->control);
275 	gb_control_mode_switch_prepare(control);
276 	gb_interface_disable(intf);
277 	mutex_unlock(&intf->mutex);
278 
279 	timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
280 	ret = wait_for_completion_interruptible_timeout(
281 			&intf->mode_switch_completion, timeout);
282 
283 	/* Finalise control-connection mode switch. */
284 	gb_control_mode_switch_complete(control);
285 	gb_control_put(control);
286 
287 	if (ret < 0) {
288 		dev_err(&intf->dev, "mode switch interrupted\n");
289 		goto err_deactivate;
290 	} else if (ret == 0) {
291 		dev_err(&intf->dev, "mode switch timed out\n");
292 		goto err_deactivate;
293 	}
294 
295 	/* Re-enable (re-enumerate) interface if still active. */
296 	mutex_lock(&intf->mutex);
297 	intf->mode_switch = false;
298 	if (intf->active) {
299 		ret = gb_interface_enable(intf);
300 		if (ret) {
301 			dev_err(&intf->dev, "failed to re-enable interface: %d\n",
302 					ret);
303 			gb_interface_deactivate(intf);
304 		}
305 	}
306 	mutex_unlock(&intf->mutex);
307 
308 out_interface_put:
309 	gb_interface_put(intf);
310 
311 	return;
312 
313 err_deactivate:
314 	mutex_lock(&intf->mutex);
315 	intf->mode_switch = false;
316 	gb_interface_deactivate(intf);
317 	mutex_unlock(&intf->mutex);
318 
319 	gb_interface_put(intf);
320 }
321 
gb_interface_request_mode_switch(struct gb_interface * intf)322 int gb_interface_request_mode_switch(struct gb_interface *intf)
323 {
324 	int ret = 0;
325 
326 	mutex_lock(&intf->mutex);
327 	if (intf->mode_switch) {
328 		ret = -EBUSY;
329 		goto out_unlock;
330 	}
331 
332 	intf->mode_switch = true;
333 	reinit_completion(&intf->mode_switch_completion);
334 
335 	/*
336 	 * Get a reference to the interface device, which will be put once the
337 	 * mode switch is complete.
338 	 */
339 	get_device(&intf->dev);
340 
341 	if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
342 		put_device(&intf->dev);
343 		ret = -EBUSY;
344 		goto out_unlock;
345 	}
346 
347 out_unlock:
348 	mutex_unlock(&intf->mutex);
349 
350 	return ret;
351 }
352 EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
353 
354 /*
355  * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
356  * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
357  * clear it after reading a non-zero value from it.
358  *
359  * FIXME: This is module-hardware dependent and needs to be extended for every
360  * type of module we want to support.
361  */
gb_interface_read_and_clear_init_status(struct gb_interface * intf)362 static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
363 {
364 	struct gb_host_device *hd = intf->hd;
365 	unsigned long bootrom_quirks;
366 	unsigned long s2l_quirks;
367 	int ret;
368 	u32 value;
369 	u16 attr;
370 	u8 init_status;
371 
372 	/*
373 	 * ES2 bridges use T_TstSrcIncrement for the init status.
374 	 *
375 	 * FIXME: Remove ES2 support
376 	 */
377 	if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
378 		attr = DME_T_TST_SRC_INCREMENT;
379 	else
380 		attr = DME_TOSHIBA_GMP_INIT_STATUS;
381 
382 	ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
383 				  DME_SELECTOR_INDEX_NULL, &value);
384 	if (ret)
385 		return ret;
386 
387 	/*
388 	 * A nonzero init status indicates the module has finished
389 	 * initializing.
390 	 */
391 	if (!value) {
392 		dev_err(&intf->dev, "invalid init status\n");
393 		return -ENODEV;
394 	}
395 
396 	/*
397 	 * Extract the init status.
398 	 *
399 	 * For ES2: We need to check lowest 8 bits of 'value'.
400 	 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
401 	 *
402 	 * FIXME: Remove ES2 support
403 	 */
404 	if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
405 		init_status = value & 0xff;
406 	else
407 		init_status = value >> 24;
408 
409 	/*
410 	 * Check if the interface is executing the quirky ES3 bootrom that,
411 	 * for example, requires E2EFC, CSD and CSV to be disabled.
412 	 */
413 	bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
414 				GB_INTERFACE_QUIRK_FORCED_DISABLE |
415 				GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
416 				GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
417 
418 	s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
419 
420 	switch (init_status) {
421 	case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
422 	case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
423 		intf->quirks |= bootrom_quirks;
424 		break;
425 	case GB_INIT_S2_LOADER_BOOT_STARTED:
426 		/* S2 Loader doesn't support runtime PM */
427 		intf->quirks &= ~bootrom_quirks;
428 		intf->quirks |= s2l_quirks;
429 		break;
430 	default:
431 		intf->quirks &= ~bootrom_quirks;
432 		intf->quirks &= ~s2l_quirks;
433 	}
434 
435 	/* Clear the init status. */
436 	return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
437 				   DME_SELECTOR_INDEX_NULL, 0);
438 }
439 
440 /* interface sysfs attributes */
441 #define gb_interface_attr(field, type)					\
442 static ssize_t field##_show(struct device *dev,				\
443 			    struct device_attribute *attr,		\
444 			    char *buf)					\
445 {									\
446 	struct gb_interface *intf = to_gb_interface(dev);		\
447 	return scnprintf(buf, PAGE_SIZE, type"\n", intf->field);	\
448 }									\
449 static DEVICE_ATTR_RO(field)
450 
451 gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
452 gb_interface_attr(ddbl1_product_id, "0x%08x");
453 gb_interface_attr(interface_id, "%u");
454 gb_interface_attr(vendor_id, "0x%08x");
455 gb_interface_attr(product_id, "0x%08x");
456 gb_interface_attr(serial_number, "0x%016llx");
457 
voltage_now_show(struct device * dev,struct device_attribute * attr,char * buf)458 static ssize_t voltage_now_show(struct device *dev,
459 				struct device_attribute *attr, char *buf)
460 {
461 	struct gb_interface *intf = to_gb_interface(dev);
462 	int ret;
463 	u32 measurement;
464 
465 	ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
466 					    GB_SVC_PWRMON_TYPE_VOL,
467 					    &measurement);
468 	if (ret) {
469 		dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
470 		return ret;
471 	}
472 
473 	return sprintf(buf, "%u\n", measurement);
474 }
475 static DEVICE_ATTR_RO(voltage_now);
476 
current_now_show(struct device * dev,struct device_attribute * attr,char * buf)477 static ssize_t current_now_show(struct device *dev,
478 				struct device_attribute *attr, char *buf)
479 {
480 	struct gb_interface *intf = to_gb_interface(dev);
481 	int ret;
482 	u32 measurement;
483 
484 	ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
485 					    GB_SVC_PWRMON_TYPE_CURR,
486 					    &measurement);
487 	if (ret) {
488 		dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
489 		return ret;
490 	}
491 
492 	return sprintf(buf, "%u\n", measurement);
493 }
494 static DEVICE_ATTR_RO(current_now);
495 
power_now_show(struct device * dev,struct device_attribute * attr,char * buf)496 static ssize_t power_now_show(struct device *dev,
497 			      struct device_attribute *attr, char *buf)
498 {
499 	struct gb_interface *intf = to_gb_interface(dev);
500 	int ret;
501 	u32 measurement;
502 
503 	ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
504 					    GB_SVC_PWRMON_TYPE_PWR,
505 					    &measurement);
506 	if (ret) {
507 		dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
508 		return ret;
509 	}
510 
511 	return sprintf(buf, "%u\n", measurement);
512 }
513 static DEVICE_ATTR_RO(power_now);
514 
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)515 static ssize_t power_state_show(struct device *dev,
516 				struct device_attribute *attr, char *buf)
517 {
518 	struct gb_interface *intf = to_gb_interface(dev);
519 
520 	if (intf->active)
521 		return scnprintf(buf, PAGE_SIZE, "on\n");
522 	else
523 		return scnprintf(buf, PAGE_SIZE, "off\n");
524 }
525 
power_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)526 static ssize_t power_state_store(struct device *dev,
527 				 struct device_attribute *attr, const char *buf,
528 				 size_t len)
529 {
530 	struct gb_interface *intf = to_gb_interface(dev);
531 	bool activate;
532 	int ret = 0;
533 
534 	if (kstrtobool(buf, &activate))
535 		return -EINVAL;
536 
537 	mutex_lock(&intf->mutex);
538 
539 	if (activate == intf->active)
540 		goto unlock;
541 
542 	if (activate) {
543 		ret = gb_interface_activate(intf);
544 		if (ret) {
545 			dev_err(&intf->dev,
546 				"failed to activate interface: %d\n", ret);
547 			goto unlock;
548 		}
549 
550 		ret = gb_interface_enable(intf);
551 		if (ret) {
552 			dev_err(&intf->dev,
553 				"failed to enable interface: %d\n", ret);
554 			gb_interface_deactivate(intf);
555 			goto unlock;
556 		}
557 	} else {
558 		gb_interface_disable(intf);
559 		gb_interface_deactivate(intf);
560 	}
561 
562 unlock:
563 	mutex_unlock(&intf->mutex);
564 
565 	if (ret)
566 		return ret;
567 
568 	return len;
569 }
570 static DEVICE_ATTR_RW(power_state);
571 
gb_interface_type_string(struct gb_interface * intf)572 static const char *gb_interface_type_string(struct gb_interface *intf)
573 {
574 	static const char * const types[] = {
575 		[GB_INTERFACE_TYPE_INVALID] = "invalid",
576 		[GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
577 		[GB_INTERFACE_TYPE_DUMMY] = "dummy",
578 		[GB_INTERFACE_TYPE_UNIPRO] = "unipro",
579 		[GB_INTERFACE_TYPE_GREYBUS] = "greybus",
580 	};
581 
582 	return types[intf->type];
583 }
584 
interface_type_show(struct device * dev,struct device_attribute * attr,char * buf)585 static ssize_t interface_type_show(struct device *dev,
586 				   struct device_attribute *attr, char *buf)
587 {
588 	struct gb_interface *intf = to_gb_interface(dev);
589 
590 	return sprintf(buf, "%s\n", gb_interface_type_string(intf));
591 }
592 static DEVICE_ATTR_RO(interface_type);
593 
594 static struct attribute *interface_unipro_attrs[] = {
595 	&dev_attr_ddbl1_manufacturer_id.attr,
596 	&dev_attr_ddbl1_product_id.attr,
597 	NULL
598 };
599 
600 static struct attribute *interface_greybus_attrs[] = {
601 	&dev_attr_vendor_id.attr,
602 	&dev_attr_product_id.attr,
603 	&dev_attr_serial_number.attr,
604 	NULL
605 };
606 
607 static struct attribute *interface_power_attrs[] = {
608 	&dev_attr_voltage_now.attr,
609 	&dev_attr_current_now.attr,
610 	&dev_attr_power_now.attr,
611 	&dev_attr_power_state.attr,
612 	NULL
613 };
614 
615 static struct attribute *interface_common_attrs[] = {
616 	&dev_attr_interface_id.attr,
617 	&dev_attr_interface_type.attr,
618 	NULL
619 };
620 
interface_unipro_is_visible(struct kobject * kobj,struct attribute * attr,int n)621 static umode_t interface_unipro_is_visible(struct kobject *kobj,
622 						struct attribute *attr, int n)
623 {
624 	struct device *dev = container_of(kobj, struct device, kobj);
625 	struct gb_interface *intf = to_gb_interface(dev);
626 
627 	switch (intf->type) {
628 	case GB_INTERFACE_TYPE_UNIPRO:
629 	case GB_INTERFACE_TYPE_GREYBUS:
630 		return attr->mode;
631 	default:
632 		return 0;
633 	}
634 }
635 
interface_greybus_is_visible(struct kobject * kobj,struct attribute * attr,int n)636 static umode_t interface_greybus_is_visible(struct kobject *kobj,
637 						struct attribute *attr, int n)
638 {
639 	struct device *dev = container_of(kobj, struct device, kobj);
640 	struct gb_interface *intf = to_gb_interface(dev);
641 
642 	switch (intf->type) {
643 	case GB_INTERFACE_TYPE_GREYBUS:
644 		return attr->mode;
645 	default:
646 		return 0;
647 	}
648 }
649 
interface_power_is_visible(struct kobject * kobj,struct attribute * attr,int n)650 static umode_t interface_power_is_visible(struct kobject *kobj,
651 						struct attribute *attr, int n)
652 {
653 	struct device *dev = container_of(kobj, struct device, kobj);
654 	struct gb_interface *intf = to_gb_interface(dev);
655 
656 	switch (intf->type) {
657 	case GB_INTERFACE_TYPE_UNIPRO:
658 	case GB_INTERFACE_TYPE_GREYBUS:
659 		return attr->mode;
660 	default:
661 		return 0;
662 	}
663 }
664 
665 static const struct attribute_group interface_unipro_group = {
666 	.is_visible	= interface_unipro_is_visible,
667 	.attrs		= interface_unipro_attrs,
668 };
669 
670 static const struct attribute_group interface_greybus_group = {
671 	.is_visible	= interface_greybus_is_visible,
672 	.attrs		= interface_greybus_attrs,
673 };
674 
675 static const struct attribute_group interface_power_group = {
676 	.is_visible	= interface_power_is_visible,
677 	.attrs		= interface_power_attrs,
678 };
679 
680 static const struct attribute_group interface_common_group = {
681 	.attrs		= interface_common_attrs,
682 };
683 
684 static const struct attribute_group *interface_groups[] = {
685 	&interface_unipro_group,
686 	&interface_greybus_group,
687 	&interface_power_group,
688 	&interface_common_group,
689 	NULL
690 };
691 
gb_interface_release(struct device * dev)692 static void gb_interface_release(struct device *dev)
693 {
694 	struct gb_interface *intf = to_gb_interface(dev);
695 
696 	trace_gb_interface_release(intf);
697 
698 	kfree(intf);
699 }
700 
701 #ifdef CONFIG_PM
gb_interface_suspend(struct device * dev)702 static int gb_interface_suspend(struct device *dev)
703 {
704 	struct gb_interface *intf = to_gb_interface(dev);
705 	int ret, timesync_ret;
706 
707 	ret = gb_control_interface_suspend_prepare(intf->control);
708 	if (ret)
709 		return ret;
710 
711 	gb_timesync_interface_remove(intf);
712 
713 	ret = gb_control_suspend(intf->control);
714 	if (ret)
715 		goto err_hibernate_abort;
716 
717 	ret = gb_interface_hibernate_link(intf);
718 	if (ret)
719 		return ret;
720 
721 	/* Delay to allow interface to enter standby before disabling refclk */
722 	msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
723 
724 	ret = gb_interface_refclk_set(intf, false);
725 	if (ret)
726 		return ret;
727 
728 	return 0;
729 
730 err_hibernate_abort:
731 	gb_control_interface_hibernate_abort(intf->control);
732 
733 	timesync_ret = gb_timesync_interface_add(intf);
734 	if (timesync_ret) {
735 		dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
736 		return timesync_ret;
737 	}
738 
739 	return ret;
740 }
741 
gb_interface_resume(struct device * dev)742 static int gb_interface_resume(struct device *dev)
743 {
744 	struct gb_interface *intf = to_gb_interface(dev);
745 	struct gb_svc *svc = intf->hd->svc;
746 	int ret;
747 
748 	ret = gb_interface_refclk_set(intf, true);
749 	if (ret)
750 		return ret;
751 
752 	ret = gb_svc_intf_resume(svc, intf->interface_id);
753 	if (ret)
754 		return ret;
755 
756 	ret = gb_control_resume(intf->control);
757 	if (ret)
758 		return ret;
759 
760 	ret = gb_timesync_interface_add(intf);
761 	if (ret) {
762 		dev_err(dev, "failed to add to timesync: %d\n", ret);
763 		return ret;
764 	}
765 
766 	ret = gb_timesync_schedule_synchronous(intf);
767 	if (ret) {
768 		dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
769 		return ret;
770 	}
771 
772 	return 0;
773 }
774 
gb_interface_runtime_idle(struct device * dev)775 static int gb_interface_runtime_idle(struct device *dev)
776 {
777 	pm_runtime_mark_last_busy(dev);
778 	pm_request_autosuspend(dev);
779 
780 	return 0;
781 }
782 #endif
783 
784 static const struct dev_pm_ops gb_interface_pm_ops = {
785 	SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
786 			   gb_interface_runtime_idle)
787 };
788 
789 struct device_type greybus_interface_type = {
790 	.name =		"greybus_interface",
791 	.release =	gb_interface_release,
792 	.pm =		&gb_interface_pm_ops,
793 };
794 
795 /*
796  * A Greybus module represents a user-replaceable component on a GMP
797  * phone.  An interface is the physical connection on that module.  A
798  * module may have more than one interface.
799  *
800  * Create a gb_interface structure to represent a discovered interface.
801  * The position of interface within the Endo is encoded in "interface_id"
802  * argument.
803  *
804  * Returns a pointer to the new interfce or a null pointer if a
805  * failure occurs due to memory exhaustion.
806  */
gb_interface_create(struct gb_module * module,u8 interface_id)807 struct gb_interface *gb_interface_create(struct gb_module *module,
808 					 u8 interface_id)
809 {
810 	struct gb_host_device *hd = module->hd;
811 	struct gb_interface *intf;
812 
813 	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
814 	if (!intf)
815 		return NULL;
816 
817 	intf->hd = hd;		/* XXX refcount? */
818 	intf->module = module;
819 	intf->interface_id = interface_id;
820 	INIT_LIST_HEAD(&intf->bundles);
821 	INIT_LIST_HEAD(&intf->manifest_descs);
822 	mutex_init(&intf->mutex);
823 	INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
824 	init_completion(&intf->mode_switch_completion);
825 
826 	/* Invalid device id to start with */
827 	intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
828 
829 	intf->dev.parent = &module->dev;
830 	intf->dev.bus = &greybus_bus_type;
831 	intf->dev.type = &greybus_interface_type;
832 	intf->dev.groups = interface_groups;
833 	intf->dev.dma_mask = module->dev.dma_mask;
834 	device_initialize(&intf->dev);
835 	dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
836 			interface_id);
837 
838 	pm_runtime_set_autosuspend_delay(&intf->dev,
839 					 GB_INTERFACE_AUTOSUSPEND_MS);
840 
841 	trace_gb_interface_create(intf);
842 
843 	return intf;
844 }
845 
gb_interface_vsys_set(struct gb_interface * intf,bool enable)846 static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
847 {
848 	struct gb_svc *svc = intf->hd->svc;
849 	int ret;
850 
851 	dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
852 
853 	ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
854 	if (ret) {
855 		dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
856 		return ret;
857 	}
858 
859 	return 0;
860 }
861 
gb_interface_refclk_set(struct gb_interface * intf,bool enable)862 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
863 {
864 	struct gb_svc *svc = intf->hd->svc;
865 	int ret;
866 
867 	dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
868 
869 	ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
870 	if (ret) {
871 		dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
872 		return ret;
873 	}
874 
875 	return 0;
876 }
877 
gb_interface_unipro_set(struct gb_interface * intf,bool enable)878 static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
879 {
880 	struct gb_svc *svc = intf->hd->svc;
881 	int ret;
882 
883 	dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
884 
885 	ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
886 	if (ret) {
887 		dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
888 		return ret;
889 	}
890 
891 	return 0;
892 }
893 
gb_interface_activate_operation(struct gb_interface * intf,enum gb_interface_type * intf_type)894 static int gb_interface_activate_operation(struct gb_interface *intf,
895 					   enum gb_interface_type *intf_type)
896 {
897 	struct gb_svc *svc = intf->hd->svc;
898 	u8 type;
899 	int ret;
900 
901 	dev_dbg(&intf->dev, "%s\n", __func__);
902 
903 	ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
904 	if (ret) {
905 		dev_err(&intf->dev, "failed to activate: %d\n", ret);
906 		return ret;
907 	}
908 
909 	switch (type) {
910 	case GB_SVC_INTF_TYPE_DUMMY:
911 		*intf_type = GB_INTERFACE_TYPE_DUMMY;
912 		/* FIXME: handle as an error for now */
913 		return -ENODEV;
914 	case GB_SVC_INTF_TYPE_UNIPRO:
915 		*intf_type = GB_INTERFACE_TYPE_UNIPRO;
916 		dev_err(&intf->dev, "interface type UniPro not supported\n");
917 		/* FIXME: handle as an error for now */
918 		return -ENODEV;
919 	case GB_SVC_INTF_TYPE_GREYBUS:
920 		*intf_type = GB_INTERFACE_TYPE_GREYBUS;
921 		break;
922 	default:
923 		dev_err(&intf->dev, "unknown interface type: %u\n", type);
924 		*intf_type = GB_INTERFACE_TYPE_UNKNOWN;
925 		return -ENODEV;
926 	}
927 
928 	return 0;
929 }
930 
gb_interface_hibernate_link(struct gb_interface * intf)931 static int gb_interface_hibernate_link(struct gb_interface *intf)
932 {
933 	struct gb_svc *svc = intf->hd->svc;
934 
935 	return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
936 }
937 
_gb_interface_activate(struct gb_interface * intf,enum gb_interface_type * type)938 static int _gb_interface_activate(struct gb_interface *intf,
939 				  enum gb_interface_type *type)
940 {
941 	int ret;
942 
943 	*type = GB_INTERFACE_TYPE_UNKNOWN;
944 
945 	if (intf->ejected || intf->removed)
946 		return -ENODEV;
947 
948 	ret = gb_interface_vsys_set(intf, true);
949 	if (ret)
950 		return ret;
951 
952 	ret = gb_interface_refclk_set(intf, true);
953 	if (ret)
954 		goto err_vsys_disable;
955 
956 	ret = gb_interface_unipro_set(intf, true);
957 	if (ret)
958 		goto err_refclk_disable;
959 
960 	ret = gb_interface_activate_operation(intf, type);
961 	if (ret) {
962 		switch (*type) {
963 		case GB_INTERFACE_TYPE_UNIPRO:
964 		case GB_INTERFACE_TYPE_GREYBUS:
965 			goto err_hibernate_link;
966 		default:
967 			goto err_unipro_disable;
968 		}
969 	}
970 
971 	ret = gb_interface_read_dme(intf);
972 	if (ret)
973 		goto err_hibernate_link;
974 
975 	ret = gb_interface_route_create(intf);
976 	if (ret)
977 		goto err_hibernate_link;
978 
979 	intf->active = true;
980 
981 	trace_gb_interface_activate(intf);
982 
983 	return 0;
984 
985 err_hibernate_link:
986 	gb_interface_hibernate_link(intf);
987 err_unipro_disable:
988 	gb_interface_unipro_set(intf, false);
989 err_refclk_disable:
990 	gb_interface_refclk_set(intf, false);
991 err_vsys_disable:
992 	gb_interface_vsys_set(intf, false);
993 
994 	return ret;
995 }
996 
997 /*
998  * At present, we assume a UniPro-only module to be a Greybus module that
999  * failed to send its mailbox poke. There is some reason to believe that this
1000  * is because of a bug in the ES3 bootrom.
1001  *
1002  * FIXME: Check if this is a Toshiba bridge before retrying?
1003  */
_gb_interface_activate_es3_hack(struct gb_interface * intf,enum gb_interface_type * type)1004 static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
1005 					   enum gb_interface_type *type)
1006 {
1007 	int retries = 3;
1008 	int ret;
1009 
1010 	while (retries--) {
1011 		ret = _gb_interface_activate(intf, type);
1012 		if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
1013 			continue;
1014 
1015 		break;
1016 	}
1017 
1018 	return ret;
1019 }
1020 
1021 /*
1022  * Activate an interface.
1023  *
1024  * Locking: Caller holds the interface mutex.
1025  */
gb_interface_activate(struct gb_interface * intf)1026 int gb_interface_activate(struct gb_interface *intf)
1027 {
1028 	enum gb_interface_type type;
1029 	int ret;
1030 
1031 	switch (intf->type) {
1032 	case GB_INTERFACE_TYPE_INVALID:
1033 	case GB_INTERFACE_TYPE_GREYBUS:
1034 		ret = _gb_interface_activate_es3_hack(intf, &type);
1035 		break;
1036 	default:
1037 		ret = _gb_interface_activate(intf, &type);
1038 	}
1039 
1040 	/* Make sure type is detected correctly during reactivation. */
1041 	if (intf->type != GB_INTERFACE_TYPE_INVALID) {
1042 		if (type != intf->type) {
1043 			dev_err(&intf->dev, "failed to detect interface type\n");
1044 
1045 			if (!ret)
1046 				gb_interface_deactivate(intf);
1047 
1048 			return -EIO;
1049 		}
1050 	} else {
1051 		intf->type = type;
1052 	}
1053 
1054 	return ret;
1055 }
1056 
1057 /*
1058  * Deactivate an interface.
1059  *
1060  * Locking: Caller holds the interface mutex.
1061  */
gb_interface_deactivate(struct gb_interface * intf)1062 void gb_interface_deactivate(struct gb_interface *intf)
1063 {
1064 	if (!intf->active)
1065 		return;
1066 
1067 	trace_gb_interface_deactivate(intf);
1068 
1069 	/* Abort any ongoing mode switch. */
1070 	if (intf->mode_switch)
1071 		complete(&intf->mode_switch_completion);
1072 
1073 	gb_interface_route_destroy(intf);
1074 	gb_interface_hibernate_link(intf);
1075 	gb_interface_unipro_set(intf, false);
1076 	gb_interface_refclk_set(intf, false);
1077 	gb_interface_vsys_set(intf, false);
1078 
1079 	intf->active = false;
1080 }
1081 
1082 /*
1083  * Enable an interface by enabling its control connection, fetching the
1084  * manifest and other information over it, and finally registering its child
1085  * devices.
1086  *
1087  * Locking: Caller holds the interface mutex.
1088  */
gb_interface_enable(struct gb_interface * intf)1089 int gb_interface_enable(struct gb_interface *intf)
1090 {
1091 	struct gb_control *control;
1092 	struct gb_bundle *bundle, *tmp;
1093 	int ret, size;
1094 	void *manifest;
1095 
1096 	ret = gb_interface_read_and_clear_init_status(intf);
1097 	if (ret) {
1098 		dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
1099 		return ret;
1100 	}
1101 
1102 	/* Establish control connection */
1103 	control = gb_control_create(intf);
1104 	if (IS_ERR(control)) {
1105 		dev_err(&intf->dev, "failed to create control device: %ld\n",
1106 				PTR_ERR(control));
1107 		return PTR_ERR(control);
1108 	}
1109 	intf->control = control;
1110 
1111 	ret = gb_control_enable(intf->control);
1112 	if (ret)
1113 		goto err_put_control;
1114 
1115 	/* Get manifest size using control protocol on CPort */
1116 	size = gb_control_get_manifest_size_operation(intf);
1117 	if (size <= 0) {
1118 		dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
1119 
1120 		if (size)
1121 			ret = size;
1122 		else
1123 			ret =  -EINVAL;
1124 
1125 		goto err_disable_control;
1126 	}
1127 
1128 	manifest = kmalloc(size, GFP_KERNEL);
1129 	if (!manifest) {
1130 		ret = -ENOMEM;
1131 		goto err_disable_control;
1132 	}
1133 
1134 	/* Get manifest using control protocol on CPort */
1135 	ret = gb_control_get_manifest_operation(intf, manifest, size);
1136 	if (ret) {
1137 		dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
1138 		goto err_free_manifest;
1139 	}
1140 
1141 	/*
1142 	 * Parse the manifest and build up our data structures representing
1143 	 * what's in it.
1144 	 */
1145 	if (!gb_manifest_parse(intf, manifest, size)) {
1146 		dev_err(&intf->dev, "failed to parse manifest\n");
1147 		ret = -EINVAL;
1148 		goto err_destroy_bundles;
1149 	}
1150 
1151 	ret = gb_control_get_bundle_versions(intf->control);
1152 	if (ret)
1153 		goto err_destroy_bundles;
1154 
1155 	ret = gb_timesync_interface_add(intf);
1156 	if (ret) {
1157 		dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
1158 		goto err_destroy_bundles;
1159 	}
1160 
1161 	/* Register the control device and any bundles */
1162 	ret = gb_control_add(intf->control);
1163 	if (ret)
1164 		goto err_remove_timesync;
1165 
1166 	pm_runtime_use_autosuspend(&intf->dev);
1167 	pm_runtime_get_noresume(&intf->dev);
1168 	pm_runtime_set_active(&intf->dev);
1169 	pm_runtime_enable(&intf->dev);
1170 
1171 	list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
1172 		ret = gb_bundle_add(bundle);
1173 		if (ret) {
1174 			gb_bundle_destroy(bundle);
1175 			continue;
1176 		}
1177 	}
1178 
1179 	kfree(manifest);
1180 
1181 	intf->enabled = true;
1182 
1183 	pm_runtime_put(&intf->dev);
1184 
1185 	trace_gb_interface_enable(intf);
1186 
1187 	return 0;
1188 
1189 err_remove_timesync:
1190 	gb_timesync_interface_remove(intf);
1191 err_destroy_bundles:
1192 	list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
1193 		gb_bundle_destroy(bundle);
1194 err_free_manifest:
1195 	kfree(manifest);
1196 err_disable_control:
1197 	gb_control_disable(intf->control);
1198 err_put_control:
1199 	gb_control_put(intf->control);
1200 	intf->control = NULL;
1201 
1202 	return ret;
1203 }
1204 
1205 /*
1206  * Disable an interface and destroy its bundles.
1207  *
1208  * Locking: Caller holds the interface mutex.
1209  */
gb_interface_disable(struct gb_interface * intf)1210 void gb_interface_disable(struct gb_interface *intf)
1211 {
1212 	struct gb_bundle *bundle;
1213 	struct gb_bundle *next;
1214 
1215 	if (!intf->enabled)
1216 		return;
1217 
1218 	trace_gb_interface_disable(intf);
1219 
1220 	pm_runtime_get_sync(&intf->dev);
1221 
1222 	/* Set disconnected flag to avoid I/O during connection tear down. */
1223 	if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
1224 		intf->disconnected = true;
1225 
1226 	list_for_each_entry_safe(bundle, next, &intf->bundles, links)
1227 		gb_bundle_destroy(bundle);
1228 
1229 	if (!intf->mode_switch && !intf->disconnected)
1230 		gb_control_interface_deactivate_prepare(intf->control);
1231 
1232 	gb_control_del(intf->control);
1233 	gb_timesync_interface_remove(intf);
1234 	gb_control_disable(intf->control);
1235 	gb_control_put(intf->control);
1236 	intf->control = NULL;
1237 
1238 	intf->enabled = false;
1239 
1240 	pm_runtime_disable(&intf->dev);
1241 	pm_runtime_set_suspended(&intf->dev);
1242 	pm_runtime_dont_use_autosuspend(&intf->dev);
1243 	pm_runtime_put_noidle(&intf->dev);
1244 }
1245 
1246 /* Enable TimeSync on an Interface control connection. */
gb_interface_timesync_enable(struct gb_interface * intf,u8 count,u64 frame_time,u32 strobe_delay,u32 refclk)1247 int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
1248 				 u64 frame_time, u32 strobe_delay, u32 refclk)
1249 {
1250 	return gb_control_timesync_enable(intf->control, count,
1251 					  frame_time, strobe_delay,
1252 					  refclk);
1253 }
1254 
1255 /* Disable TimeSync on an Interface control connection. */
gb_interface_timesync_disable(struct gb_interface * intf)1256 int gb_interface_timesync_disable(struct gb_interface *intf)
1257 {
1258 	return gb_control_timesync_disable(intf->control);
1259 }
1260 
1261 /* Transmit the Authoritative FrameTime via an Interface control connection. */
gb_interface_timesync_authoritative(struct gb_interface * intf,u64 * frame_time)1262 int gb_interface_timesync_authoritative(struct gb_interface *intf,
1263 					u64 *frame_time)
1264 {
1265 	return gb_control_timesync_authoritative(intf->control,
1266 						frame_time);
1267 }
1268 
1269 /* Register an interface. */
gb_interface_add(struct gb_interface * intf)1270 int gb_interface_add(struct gb_interface *intf)
1271 {
1272 	int ret;
1273 
1274 	ret = device_add(&intf->dev);
1275 	if (ret) {
1276 		dev_err(&intf->dev, "failed to register interface: %d\n", ret);
1277 		return ret;
1278 	}
1279 
1280 	trace_gb_interface_add(intf);
1281 
1282 	dev_info(&intf->dev, "Interface added (%s)\n",
1283 			gb_interface_type_string(intf));
1284 
1285 	switch (intf->type) {
1286 	case GB_INTERFACE_TYPE_GREYBUS:
1287 		dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
1288 				intf->vendor_id, intf->product_id);
1289 		/* fall-through */
1290 	case GB_INTERFACE_TYPE_UNIPRO:
1291 		dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
1292 				intf->ddbl1_manufacturer_id,
1293 				intf->ddbl1_product_id);
1294 		break;
1295 	default:
1296 		break;
1297 	}
1298 
1299 	return 0;
1300 }
1301 
1302 /* Deregister an interface. */
gb_interface_del(struct gb_interface * intf)1303 void gb_interface_del(struct gb_interface *intf)
1304 {
1305 	if (device_is_registered(&intf->dev)) {
1306 		trace_gb_interface_del(intf);
1307 
1308 		device_del(&intf->dev);
1309 		dev_info(&intf->dev, "Interface removed\n");
1310 	}
1311 }
1312 
gb_interface_put(struct gb_interface * intf)1313 void gb_interface_put(struct gb_interface *intf)
1314 {
1315 	put_device(&intf->dev);
1316 }
1317