• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Greybus bundles
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 
10 #include "greybus.h"
11 #include "greybus_trace.h"
12 
bundle_class_show(struct device * dev,struct device_attribute * attr,char * buf)13 static ssize_t bundle_class_show(struct device *dev,
14 				 struct device_attribute *attr, char *buf)
15 {
16 	struct gb_bundle *bundle = to_gb_bundle(dev);
17 
18 	return sprintf(buf, "0x%02x\n", bundle->class);
19 }
20 static DEVICE_ATTR_RO(bundle_class);
21 
bundle_id_show(struct device * dev,struct device_attribute * attr,char * buf)22 static ssize_t bundle_id_show(struct device *dev,
23 			      struct device_attribute *attr, char *buf)
24 {
25 	struct gb_bundle *bundle = to_gb_bundle(dev);
26 
27 	return sprintf(buf, "%u\n", bundle->id);
28 }
29 static DEVICE_ATTR_RO(bundle_id);
30 
state_show(struct device * dev,struct device_attribute * attr,char * buf)31 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
32 			  char *buf)
33 {
34 	struct gb_bundle *bundle = to_gb_bundle(dev);
35 
36 	if (bundle->state == NULL)
37 		return sprintf(buf, "\n");
38 
39 	return sprintf(buf, "%s\n", bundle->state);
40 }
41 
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)42 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
43 			   const char *buf, size_t size)
44 {
45 	struct gb_bundle *bundle = to_gb_bundle(dev);
46 
47 	kfree(bundle->state);
48 	bundle->state = kstrdup(buf, GFP_KERNEL);
49 	if (!bundle->state)
50 		return -ENOMEM;
51 
52 	/* Tell userspace that the file contents changed */
53 	sysfs_notify(&bundle->dev.kobj, NULL, "state");
54 
55 	return size;
56 }
57 static DEVICE_ATTR_RW(state);
58 
59 static struct attribute *bundle_attrs[] = {
60 	&dev_attr_bundle_class.attr,
61 	&dev_attr_bundle_id.attr,
62 	&dev_attr_state.attr,
63 	NULL,
64 };
65 
66 ATTRIBUTE_GROUPS(bundle);
67 
gb_bundle_find(struct gb_interface * intf,u8 bundle_id)68 static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
69 							u8 bundle_id)
70 {
71 	struct gb_bundle *bundle;
72 
73 	list_for_each_entry(bundle, &intf->bundles, links) {
74 		if (bundle->id == bundle_id)
75 			return bundle;
76 	}
77 
78 	return NULL;
79 }
80 
gb_bundle_release(struct device * dev)81 static void gb_bundle_release(struct device *dev)
82 {
83 	struct gb_bundle *bundle = to_gb_bundle(dev);
84 
85 	trace_gb_bundle_release(bundle);
86 
87 	kfree(bundle->state);
88 	kfree(bundle->cport_desc);
89 	kfree(bundle);
90 }
91 
92 #ifdef CONFIG_PM
gb_bundle_disable_all_connections(struct gb_bundle * bundle)93 static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
94 {
95 	struct gb_connection *connection;
96 
97 	list_for_each_entry(connection, &bundle->connections, bundle_links)
98 		gb_connection_disable(connection);
99 }
100 
gb_bundle_enable_all_connections(struct gb_bundle * bundle)101 static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
102 {
103 	struct gb_connection *connection;
104 
105 	list_for_each_entry(connection, &bundle->connections, bundle_links)
106 		gb_connection_enable(connection);
107 }
108 
gb_bundle_suspend(struct device * dev)109 static int gb_bundle_suspend(struct device *dev)
110 {
111 	struct gb_bundle *bundle = to_gb_bundle(dev);
112 	const struct dev_pm_ops *pm = dev->driver->pm;
113 	int ret;
114 
115 	if (pm && pm->runtime_suspend) {
116 		ret = pm->runtime_suspend(&bundle->dev);
117 		if (ret)
118 			return ret;
119 	} else {
120 		gb_bundle_disable_all_connections(bundle);
121 	}
122 
123 	ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
124 	if (ret) {
125 		if (pm && pm->runtime_resume)
126 			ret = pm->runtime_resume(dev);
127 		else
128 			gb_bundle_enable_all_connections(bundle);
129 
130 		return ret;
131 	}
132 
133 	return 0;
134 }
135 
gb_bundle_resume(struct device * dev)136 static int gb_bundle_resume(struct device *dev)
137 {
138 	struct gb_bundle *bundle = to_gb_bundle(dev);
139 	const struct dev_pm_ops *pm = dev->driver->pm;
140 	int ret;
141 
142 	ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
143 	if (ret)
144 		return ret;
145 
146 	if (pm && pm->runtime_resume) {
147 		ret = pm->runtime_resume(dev);
148 		if (ret)
149 			return ret;
150 	} else {
151 		gb_bundle_enable_all_connections(bundle);
152 	}
153 
154 	return 0;
155 }
156 
gb_bundle_idle(struct device * dev)157 static int gb_bundle_idle(struct device *dev)
158 {
159 	pm_runtime_mark_last_busy(dev);
160 	pm_request_autosuspend(dev);
161 
162 	return 0;
163 }
164 #endif
165 
166 static const struct dev_pm_ops gb_bundle_pm_ops = {
167 	SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
168 };
169 
170 struct device_type greybus_bundle_type = {
171 	.name =		"greybus_bundle",
172 	.release =	gb_bundle_release,
173 	.pm =		&gb_bundle_pm_ops,
174 };
175 
176 /*
177  * Create a gb_bundle structure to represent a discovered
178  * bundle.  Returns a pointer to the new bundle or a null
179  * pointer if a failure occurs due to memory exhaustion.
180  */
gb_bundle_create(struct gb_interface * intf,u8 bundle_id,u8 class)181 struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
182 				   u8 class)
183 {
184 	struct gb_bundle *bundle;
185 
186 	if (bundle_id == BUNDLE_ID_NONE) {
187 		dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
188 		return NULL;
189 	}
190 
191 	/*
192 	 * Reject any attempt to reuse a bundle id.  We initialize
193 	 * these serially, so there's no need to worry about keeping
194 	 * the interface bundle list locked here.
195 	 */
196 	if (gb_bundle_find(intf, bundle_id)) {
197 		dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
198 		return NULL;
199 	}
200 
201 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
202 	if (!bundle)
203 		return NULL;
204 
205 	bundle->intf = intf;
206 	bundle->id = bundle_id;
207 	bundle->class = class;
208 	INIT_LIST_HEAD(&bundle->connections);
209 
210 	bundle->dev.parent = &intf->dev;
211 	bundle->dev.bus = &greybus_bus_type;
212 	bundle->dev.type = &greybus_bundle_type;
213 	bundle->dev.groups = bundle_groups;
214 	bundle->dev.dma_mask = intf->dev.dma_mask;
215 	device_initialize(&bundle->dev);
216 	dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
217 
218 	list_add(&bundle->links, &intf->bundles);
219 
220 	trace_gb_bundle_create(bundle);
221 
222 	return bundle;
223 }
224 
gb_bundle_add(struct gb_bundle * bundle)225 int gb_bundle_add(struct gb_bundle *bundle)
226 {
227 	int ret;
228 
229 	ret = device_add(&bundle->dev);
230 	if (ret) {
231 		dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
232 		return ret;
233 	}
234 
235 	trace_gb_bundle_add(bundle);
236 
237 	return 0;
238 }
239 
240 /*
241  * Tear down a previously set up bundle.
242  */
gb_bundle_destroy(struct gb_bundle * bundle)243 void gb_bundle_destroy(struct gb_bundle *bundle)
244 {
245 	trace_gb_bundle_destroy(bundle);
246 
247 	if (device_is_registered(&bundle->dev))
248 		device_del(&bundle->dev);
249 
250 	list_del(&bundle->links);
251 
252 	put_device(&bundle->dev);
253 }
254