• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4  */
5 
6 /*
7  * This driver supports an interface for DCA clients and providers to meet.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/notifier.h>
12 #include <linux/device.h>
13 #include <linux/dca.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 
17 #define DCA_VERSION "1.12.1"
18 
19 MODULE_VERSION(DCA_VERSION);
20 MODULE_LICENSE("GPL");
21 MODULE_AUTHOR("Intel Corporation");
22 
23 static DEFINE_RAW_SPINLOCK(dca_lock);
24 
25 static LIST_HEAD(dca_domains);
26 
27 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
28 
29 static int dca_providers_blocked;
30 
dca_pci_rc_from_dev(struct device * dev)31 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
32 {
33 	struct pci_dev *pdev = to_pci_dev(dev);
34 	struct pci_bus *bus = pdev->bus;
35 
36 	while (bus->parent)
37 		bus = bus->parent;
38 
39 	return bus;
40 }
41 
dca_allocate_domain(struct pci_bus * rc)42 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
43 {
44 	struct dca_domain *domain;
45 
46 	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
47 	if (!domain)
48 		return NULL;
49 
50 	INIT_LIST_HEAD(&domain->dca_providers);
51 	domain->pci_rc = rc;
52 
53 	return domain;
54 }
55 
dca_free_domain(struct dca_domain * domain)56 static void dca_free_domain(struct dca_domain *domain)
57 {
58 	list_del(&domain->node);
59 	kfree(domain);
60 }
61 
dca_provider_ioat_ver_3_0(struct device * dev)62 static int dca_provider_ioat_ver_3_0(struct device *dev)
63 {
64 	struct pci_dev *pdev = to_pci_dev(dev);
65 
66 	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
67 		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
68 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
69 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
70 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
71 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
72 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
73 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
74 		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
75 }
76 
unregister_dca_providers(void)77 static void unregister_dca_providers(void)
78 {
79 	struct dca_provider *dca, *_dca;
80 	struct list_head unregistered_providers;
81 	struct dca_domain *domain;
82 	unsigned long flags;
83 
84 	blocking_notifier_call_chain(&dca_provider_chain,
85 				     DCA_PROVIDER_REMOVE, NULL);
86 
87 	INIT_LIST_HEAD(&unregistered_providers);
88 
89 	raw_spin_lock_irqsave(&dca_lock, flags);
90 
91 	if (list_empty(&dca_domains)) {
92 		raw_spin_unlock_irqrestore(&dca_lock, flags);
93 		return;
94 	}
95 
96 	/* at this point only one domain in the list is expected */
97 	domain = list_first_entry(&dca_domains, struct dca_domain, node);
98 
99 	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
100 		list_move(&dca->node, &unregistered_providers);
101 
102 	dca_free_domain(domain);
103 
104 	raw_spin_unlock_irqrestore(&dca_lock, flags);
105 
106 	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
107 		dca_sysfs_remove_provider(dca);
108 		list_del(&dca->node);
109 	}
110 }
111 
dca_find_domain(struct pci_bus * rc)112 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
113 {
114 	struct dca_domain *domain;
115 
116 	list_for_each_entry(domain, &dca_domains, node)
117 		if (domain->pci_rc == rc)
118 			return domain;
119 
120 	return NULL;
121 }
122 
dca_get_domain(struct device * dev)123 static struct dca_domain *dca_get_domain(struct device *dev)
124 {
125 	struct pci_bus *rc;
126 	struct dca_domain *domain;
127 
128 	rc = dca_pci_rc_from_dev(dev);
129 	domain = dca_find_domain(rc);
130 
131 	if (!domain) {
132 		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
133 			dca_providers_blocked = 1;
134 	}
135 
136 	return domain;
137 }
138 
dca_find_provider_by_dev(struct device * dev)139 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
140 {
141 	struct dca_provider *dca;
142 	struct pci_bus *rc;
143 	struct dca_domain *domain;
144 
145 	if (dev) {
146 		rc = dca_pci_rc_from_dev(dev);
147 		domain = dca_find_domain(rc);
148 		if (!domain)
149 			return NULL;
150 	} else {
151 		if (!list_empty(&dca_domains))
152 			domain = list_first_entry(&dca_domains,
153 						  struct dca_domain,
154 						  node);
155 		else
156 			return NULL;
157 	}
158 
159 	list_for_each_entry(dca, &domain->dca_providers, node)
160 		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
161 			return dca;
162 
163 	return NULL;
164 }
165 
166 /**
167  * dca_add_requester - add a dca client to the list
168  * @dev - the device that wants dca service
169  */
dca_add_requester(struct device * dev)170 int dca_add_requester(struct device *dev)
171 {
172 	struct dca_provider *dca;
173 	int err, slot = -ENODEV;
174 	unsigned long flags;
175 	struct pci_bus *pci_rc;
176 	struct dca_domain *domain;
177 
178 	if (!dev)
179 		return -EFAULT;
180 
181 	raw_spin_lock_irqsave(&dca_lock, flags);
182 
183 	/* check if the requester has not been added already */
184 	dca = dca_find_provider_by_dev(dev);
185 	if (dca) {
186 		raw_spin_unlock_irqrestore(&dca_lock, flags);
187 		return -EEXIST;
188 	}
189 
190 	pci_rc = dca_pci_rc_from_dev(dev);
191 	domain = dca_find_domain(pci_rc);
192 	if (!domain) {
193 		raw_spin_unlock_irqrestore(&dca_lock, flags);
194 		return -ENODEV;
195 	}
196 
197 	list_for_each_entry(dca, &domain->dca_providers, node) {
198 		slot = dca->ops->add_requester(dca, dev);
199 		if (slot >= 0)
200 			break;
201 	}
202 
203 	raw_spin_unlock_irqrestore(&dca_lock, flags);
204 
205 	if (slot < 0)
206 		return slot;
207 
208 	err = dca_sysfs_add_req(dca, dev, slot);
209 	if (err) {
210 		raw_spin_lock_irqsave(&dca_lock, flags);
211 		if (dca == dca_find_provider_by_dev(dev))
212 			dca->ops->remove_requester(dca, dev);
213 		raw_spin_unlock_irqrestore(&dca_lock, flags);
214 		return err;
215 	}
216 
217 	return 0;
218 }
219 EXPORT_SYMBOL_GPL(dca_add_requester);
220 
221 /**
222  * dca_remove_requester - remove a dca client from the list
223  * @dev - the device that wants dca service
224  */
dca_remove_requester(struct device * dev)225 int dca_remove_requester(struct device *dev)
226 {
227 	struct dca_provider *dca;
228 	int slot;
229 	unsigned long flags;
230 
231 	if (!dev)
232 		return -EFAULT;
233 
234 	raw_spin_lock_irqsave(&dca_lock, flags);
235 	dca = dca_find_provider_by_dev(dev);
236 	if (!dca) {
237 		raw_spin_unlock_irqrestore(&dca_lock, flags);
238 		return -ENODEV;
239 	}
240 	slot = dca->ops->remove_requester(dca, dev);
241 	raw_spin_unlock_irqrestore(&dca_lock, flags);
242 
243 	if (slot < 0)
244 		return slot;
245 
246 	dca_sysfs_remove_req(dca, slot);
247 
248 	return 0;
249 }
250 EXPORT_SYMBOL_GPL(dca_remove_requester);
251 
252 /**
253  * dca_common_get_tag - return the dca tag (serves both new and old api)
254  * @dev - the device that wants dca service
255  * @cpu - the cpuid as returned by get_cpu()
256  */
dca_common_get_tag(struct device * dev,int cpu)257 static u8 dca_common_get_tag(struct device *dev, int cpu)
258 {
259 	struct dca_provider *dca;
260 	u8 tag;
261 	unsigned long flags;
262 
263 	raw_spin_lock_irqsave(&dca_lock, flags);
264 
265 	dca = dca_find_provider_by_dev(dev);
266 	if (!dca) {
267 		raw_spin_unlock_irqrestore(&dca_lock, flags);
268 		return -ENODEV;
269 	}
270 	tag = dca->ops->get_tag(dca, dev, cpu);
271 
272 	raw_spin_unlock_irqrestore(&dca_lock, flags);
273 	return tag;
274 }
275 
276 /**
277  * dca3_get_tag - return the dca tag to the requester device
278  *                for the given cpu (new api)
279  * @dev - the device that wants dca service
280  * @cpu - the cpuid as returned by get_cpu()
281  */
dca3_get_tag(struct device * dev,int cpu)282 u8 dca3_get_tag(struct device *dev, int cpu)
283 {
284 	if (!dev)
285 		return -EFAULT;
286 
287 	return dca_common_get_tag(dev, cpu);
288 }
289 EXPORT_SYMBOL_GPL(dca3_get_tag);
290 
291 /**
292  * dca_get_tag - return the dca tag for the given cpu (old api)
293  * @cpu - the cpuid as returned by get_cpu()
294  */
dca_get_tag(int cpu)295 u8 dca_get_tag(int cpu)
296 {
297 	struct device *dev = NULL;
298 
299 	return dca_common_get_tag(dev, cpu);
300 }
301 EXPORT_SYMBOL_GPL(dca_get_tag);
302 
303 /**
304  * alloc_dca_provider - get data struct for describing a dca provider
305  * @ops - pointer to struct of dca operation function pointers
306  * @priv_size - size of extra mem to be added for provider's needs
307  */
alloc_dca_provider(const struct dca_ops * ops,int priv_size)308 struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
309 					int priv_size)
310 {
311 	struct dca_provider *dca;
312 	int alloc_size;
313 
314 	alloc_size = (sizeof(*dca) + priv_size);
315 	dca = kzalloc(alloc_size, GFP_KERNEL);
316 	if (!dca)
317 		return NULL;
318 	dca->ops = ops;
319 
320 	return dca;
321 }
322 EXPORT_SYMBOL_GPL(alloc_dca_provider);
323 
324 /**
325  * free_dca_provider - release the dca provider data struct
326  * @ops - pointer to struct of dca operation function pointers
327  * @priv_size - size of extra mem to be added for provider's needs
328  */
free_dca_provider(struct dca_provider * dca)329 void free_dca_provider(struct dca_provider *dca)
330 {
331 	kfree(dca);
332 }
333 EXPORT_SYMBOL_GPL(free_dca_provider);
334 
335 /**
336  * register_dca_provider - register a dca provider
337  * @dca - struct created by alloc_dca_provider()
338  * @dev - device providing dca services
339  */
register_dca_provider(struct dca_provider * dca,struct device * dev)340 int register_dca_provider(struct dca_provider *dca, struct device *dev)
341 {
342 	int err;
343 	unsigned long flags;
344 	struct dca_domain *domain, *newdomain = NULL;
345 
346 	raw_spin_lock_irqsave(&dca_lock, flags);
347 	if (dca_providers_blocked) {
348 		raw_spin_unlock_irqrestore(&dca_lock, flags);
349 		return -ENODEV;
350 	}
351 	raw_spin_unlock_irqrestore(&dca_lock, flags);
352 
353 	err = dca_sysfs_add_provider(dca, dev);
354 	if (err)
355 		return err;
356 
357 	raw_spin_lock_irqsave(&dca_lock, flags);
358 	domain = dca_get_domain(dev);
359 	if (!domain) {
360 		struct pci_bus *rc;
361 
362 		if (dca_providers_blocked) {
363 			raw_spin_unlock_irqrestore(&dca_lock, flags);
364 			dca_sysfs_remove_provider(dca);
365 			unregister_dca_providers();
366 			return -ENODEV;
367 		}
368 
369 		raw_spin_unlock_irqrestore(&dca_lock, flags);
370 		rc = dca_pci_rc_from_dev(dev);
371 		newdomain = dca_allocate_domain(rc);
372 		if (!newdomain)
373 			return -ENODEV;
374 		raw_spin_lock_irqsave(&dca_lock, flags);
375 		/* Recheck, we might have raced after dropping the lock */
376 		domain = dca_get_domain(dev);
377 		if (!domain) {
378 			domain = newdomain;
379 			newdomain = NULL;
380 			list_add(&domain->node, &dca_domains);
381 		}
382 	}
383 	list_add(&dca->node, &domain->dca_providers);
384 	raw_spin_unlock_irqrestore(&dca_lock, flags);
385 
386 	blocking_notifier_call_chain(&dca_provider_chain,
387 				     DCA_PROVIDER_ADD, NULL);
388 	kfree(newdomain);
389 	return 0;
390 }
391 EXPORT_SYMBOL_GPL(register_dca_provider);
392 
393 /**
394  * unregister_dca_provider - remove a dca provider
395  * @dca - struct created by alloc_dca_provider()
396  */
unregister_dca_provider(struct dca_provider * dca,struct device * dev)397 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
398 {
399 	unsigned long flags;
400 	struct pci_bus *pci_rc;
401 	struct dca_domain *domain;
402 
403 	blocking_notifier_call_chain(&dca_provider_chain,
404 				     DCA_PROVIDER_REMOVE, NULL);
405 
406 	raw_spin_lock_irqsave(&dca_lock, flags);
407 
408 	if (list_empty(&dca_domains)) {
409 		raw_spin_unlock_irqrestore(&dca_lock, flags);
410 		return;
411 	}
412 
413 	list_del(&dca->node);
414 
415 	pci_rc = dca_pci_rc_from_dev(dev);
416 	domain = dca_find_domain(pci_rc);
417 	if (list_empty(&domain->dca_providers))
418 		dca_free_domain(domain);
419 
420 	raw_spin_unlock_irqrestore(&dca_lock, flags);
421 
422 	dca_sysfs_remove_provider(dca);
423 }
424 EXPORT_SYMBOL_GPL(unregister_dca_provider);
425 
426 /**
427  * dca_register_notify - register a client's notifier callback
428  */
dca_register_notify(struct notifier_block * nb)429 void dca_register_notify(struct notifier_block *nb)
430 {
431 	blocking_notifier_chain_register(&dca_provider_chain, nb);
432 }
433 EXPORT_SYMBOL_GPL(dca_register_notify);
434 
435 /**
436  * dca_unregister_notify - remove a client's notifier callback
437  */
dca_unregister_notify(struct notifier_block * nb)438 void dca_unregister_notify(struct notifier_block *nb)
439 {
440 	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
441 }
442 EXPORT_SYMBOL_GPL(dca_unregister_notify);
443 
dca_init(void)444 static int __init dca_init(void)
445 {
446 	pr_info("dca service started, version %s\n", DCA_VERSION);
447 	return dca_sysfs_init();
448 }
449 
dca_exit(void)450 static void __exit dca_exit(void)
451 {
452 	dca_sysfs_exit();
453 }
454 
455 arch_initcall(dca_init);
456 module_exit(dca_exit);
457 
458