1 /*
2 * PCI Backend - Provides a Virtual PCI bus (with real devices)
3 * to the frontend
4 *
5 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/mutex.h>
14 #include "pciback.h"
15
16 #define PCI_SLOT_MAX 32
17
18 struct vpci_dev_data {
19 /* Access to dev_list must be protected by lock */
20 struct list_head dev_list[PCI_SLOT_MAX];
21 struct mutex lock;
22 };
23
list_first(struct list_head * head)24 static inline struct list_head *list_first(struct list_head *head)
25 {
26 return head->next;
27 }
28
__xen_pcibk_get_pci_dev(struct xen_pcibk_device * pdev,unsigned int domain,unsigned int bus,unsigned int devfn)29 static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
30 unsigned int domain,
31 unsigned int bus,
32 unsigned int devfn)
33 {
34 struct pci_dev_entry *entry;
35 struct pci_dev *dev = NULL;
36 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
37
38 if (domain != 0 || bus != 0)
39 return NULL;
40
41 if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
42 mutex_lock(&vpci_dev->lock);
43
44 list_for_each_entry(entry,
45 &vpci_dev->dev_list[PCI_SLOT(devfn)],
46 list) {
47 if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
48 dev = entry->dev;
49 break;
50 }
51 }
52
53 mutex_unlock(&vpci_dev->lock);
54 }
55 return dev;
56 }
57
match_slot(struct pci_dev * l,struct pci_dev * r)58 static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
59 {
60 if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
61 && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
62 return 1;
63
64 return 0;
65 }
66
__xen_pcibk_add_pci_dev(struct xen_pcibk_device * pdev,struct pci_dev * dev,int devid,publish_pci_dev_cb publish_cb)67 static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
68 struct pci_dev *dev, int devid,
69 publish_pci_dev_cb publish_cb)
70 {
71 int err = 0, slot, func = PCI_FUNC(dev->devfn);
72 struct pci_dev_entry *t, *dev_entry;
73 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
74
75 if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
76 err = -EFAULT;
77 xenbus_dev_fatal(pdev->xdev, err,
78 "Can't export bridges on the virtual PCI bus");
79 goto out;
80 }
81
82 dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
83 if (!dev_entry) {
84 err = -ENOMEM;
85 xenbus_dev_fatal(pdev->xdev, err,
86 "Error adding entry to virtual PCI bus");
87 goto out;
88 }
89
90 dev_entry->dev = dev;
91
92 mutex_lock(&vpci_dev->lock);
93
94 /*
95 * Keep multi-function devices together on the virtual PCI bus, except
96 * that we want to keep virtual functions at func 0 on their own. They
97 * aren't multi-function devices and hence their presence at func 0
98 * may cause guests to not scan the other functions.
99 */
100 if (!dev->is_virtfn || func) {
101 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
102 if (list_empty(&vpci_dev->dev_list[slot]))
103 continue;
104
105 t = list_entry(list_first(&vpci_dev->dev_list[slot]),
106 struct pci_dev_entry, list);
107 if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
108 continue;
109
110 if (match_slot(dev, t->dev)) {
111 pr_info("vpci: %s: assign to virtual slot %d func %d\n",
112 pci_name(dev), slot,
113 func);
114 list_add_tail(&dev_entry->list,
115 &vpci_dev->dev_list[slot]);
116 goto unlock;
117 }
118 }
119 }
120
121 /* Assign to a new slot on the virtual PCI bus */
122 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
123 if (list_empty(&vpci_dev->dev_list[slot])) {
124 pr_info("vpci: %s: assign to virtual slot %d\n",
125 pci_name(dev), slot);
126 list_add_tail(&dev_entry->list,
127 &vpci_dev->dev_list[slot]);
128 goto unlock;
129 }
130 }
131
132 err = -ENOMEM;
133 xenbus_dev_fatal(pdev->xdev, err,
134 "No more space on root virtual PCI bus");
135
136 unlock:
137 mutex_unlock(&vpci_dev->lock);
138
139 /* Publish this device. */
140 if (!err)
141 err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
142 else
143 kfree(dev_entry);
144
145 out:
146 return err;
147 }
148
__xen_pcibk_release_pci_dev(struct xen_pcibk_device * pdev,struct pci_dev * dev,bool lock)149 static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
150 struct pci_dev *dev, bool lock)
151 {
152 int slot;
153 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
154 struct pci_dev *found_dev = NULL;
155
156 mutex_lock(&vpci_dev->lock);
157
158 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
159 struct pci_dev_entry *e;
160
161 list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
162 if (e->dev == dev) {
163 list_del(&e->list);
164 found_dev = e->dev;
165 kfree(e);
166 goto out;
167 }
168 }
169 }
170
171 out:
172 mutex_unlock(&vpci_dev->lock);
173
174 if (found_dev) {
175 if (lock)
176 device_lock(&found_dev->dev);
177 pcistub_put_pci_dev(found_dev);
178 if (lock)
179 device_unlock(&found_dev->dev);
180 }
181 }
182
__xen_pcibk_init_devices(struct xen_pcibk_device * pdev)183 static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
184 {
185 int slot;
186 struct vpci_dev_data *vpci_dev;
187
188 vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
189 if (!vpci_dev)
190 return -ENOMEM;
191
192 mutex_init(&vpci_dev->lock);
193
194 for (slot = 0; slot < PCI_SLOT_MAX; slot++)
195 INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
196
197 pdev->pci_dev_data = vpci_dev;
198
199 return 0;
200 }
201
__xen_pcibk_publish_pci_roots(struct xen_pcibk_device * pdev,publish_pci_root_cb publish_cb)202 static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
203 publish_pci_root_cb publish_cb)
204 {
205 /* The Virtual PCI bus has only one root */
206 return publish_cb(pdev, 0, 0);
207 }
208
__xen_pcibk_release_devices(struct xen_pcibk_device * pdev)209 static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
210 {
211 int slot;
212 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
213
214 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
215 struct pci_dev_entry *e, *tmp;
216 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
217 list) {
218 struct pci_dev *dev = e->dev;
219 list_del(&e->list);
220 device_lock(&dev->dev);
221 pcistub_put_pci_dev(dev);
222 device_unlock(&dev->dev);
223 kfree(e);
224 }
225 }
226
227 kfree(vpci_dev);
228 pdev->pci_dev_data = NULL;
229 }
230
__xen_pcibk_get_pcifront_dev(struct pci_dev * pcidev,struct xen_pcibk_device * pdev,unsigned int * domain,unsigned int * bus,unsigned int * devfn)231 static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
232 struct xen_pcibk_device *pdev,
233 unsigned int *domain, unsigned int *bus,
234 unsigned int *devfn)
235 {
236 struct pci_dev_entry *entry;
237 struct pci_dev *dev = NULL;
238 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
239 int found = 0, slot;
240
241 mutex_lock(&vpci_dev->lock);
242 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
243 list_for_each_entry(entry,
244 &vpci_dev->dev_list[slot],
245 list) {
246 dev = entry->dev;
247 if (dev && dev->bus->number == pcidev->bus->number
248 && pci_domain_nr(dev->bus) ==
249 pci_domain_nr(pcidev->bus)
250 && dev->devfn == pcidev->devfn) {
251 found = 1;
252 *domain = 0;
253 *bus = 0;
254 *devfn = PCI_DEVFN(slot,
255 PCI_FUNC(pcidev->devfn));
256 }
257 }
258 }
259 mutex_unlock(&vpci_dev->lock);
260 return found;
261 }
262
263 const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
264 .name = "vpci",
265 .init = __xen_pcibk_init_devices,
266 .free = __xen_pcibk_release_devices,
267 .find = __xen_pcibk_get_pcifront_dev,
268 .publish = __xen_pcibk_publish_pci_roots,
269 .release = __xen_pcibk_release_pci_dev,
270 .add = __xen_pcibk_add_pci_dev,
271 .get = __xen_pcibk_get_pci_dev,
272 };
273