• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/delay.h>
2 #include <linux/pci.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/ioport.h>
6 #include <linux/wait.h>
7 
8 #include "pci.h"
9 
10 /*
11  * This interrupt-safe spinlock protects all accesses to PCI
12  * configuration space.
13  */
14 
15 static DEFINE_SPINLOCK(pci_lock);
16 
17 /*
18  *  Wrappers for all PCI configuration access functions.  They just check
19  *  alignment, do locking and call the low-level functions pointed to
20  *  by pci_dev->ops.
21  */
22 
23 #define PCI_byte_BAD 0
24 #define PCI_word_BAD (pos & 1)
25 #define PCI_dword_BAD (pos & 3)
26 
27 #define PCI_OP_READ(size,type,len) \
28 int pci_bus_read_config_##size \
29 	(struct pci_bus *bus, unsigned int devfn, int pos, type *value)	\
30 {									\
31 	int res;							\
32 	unsigned long flags;						\
33 	u32 data = 0;							\
34 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
35 	spin_lock_irqsave(&pci_lock, flags);				\
36 	res = bus->ops->read(bus, devfn, pos, len, &data);		\
37 	*value = (type)data;						\
38 	spin_unlock_irqrestore(&pci_lock, flags);			\
39 	return res;							\
40 }
41 
42 #define PCI_OP_WRITE(size,type,len) \
43 int pci_bus_write_config_##size \
44 	(struct pci_bus *bus, unsigned int devfn, int pos, type value)	\
45 {									\
46 	int res;							\
47 	unsigned long flags;						\
48 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
49 	spin_lock_irqsave(&pci_lock, flags);				\
50 	res = bus->ops->write(bus, devfn, pos, len, value);		\
51 	spin_unlock_irqrestore(&pci_lock, flags);			\
52 	return res;							\
53 }
54 
55 PCI_OP_READ(byte, u8, 1)
56 PCI_OP_READ(word, u16, 2)
57 PCI_OP_READ(dword, u32, 4)
58 PCI_OP_WRITE(byte, u8, 1)
59 PCI_OP_WRITE(word, u16, 2)
60 PCI_OP_WRITE(dword, u32, 4)
61 
62 EXPORT_SYMBOL(pci_bus_read_config_byte);
63 EXPORT_SYMBOL(pci_bus_read_config_word);
64 EXPORT_SYMBOL(pci_bus_read_config_dword);
65 EXPORT_SYMBOL(pci_bus_write_config_byte);
66 EXPORT_SYMBOL(pci_bus_write_config_word);
67 EXPORT_SYMBOL(pci_bus_write_config_dword);
68 
69 
70 /**
71  * pci_read_vpd - Read one entry from Vital Product Data
72  * @dev:	pci device struct
73  * @pos:	offset in vpd space
74  * @count:	number of bytes to read
75  * @buf:	pointer to where to store result
76  *
77  */
pci_read_vpd(struct pci_dev * dev,loff_t pos,size_t count,void * buf)78 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
79 {
80 	if (!dev->vpd || !dev->vpd->ops)
81 		return -ENODEV;
82 	return dev->vpd->ops->read(dev, pos, count, buf);
83 }
84 EXPORT_SYMBOL(pci_read_vpd);
85 
86 /**
87  * pci_write_vpd - Write entry to Vital Product Data
88  * @dev:	pci device struct
89  * @pos:	offset in vpd space
90  * @count:	number of bytes to read
91  * @val:	value to write
92  *
93  */
pci_write_vpd(struct pci_dev * dev,loff_t pos,size_t count,const void * buf)94 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
95 {
96 	if (!dev->vpd || !dev->vpd->ops)
97 		return -ENODEV;
98 	return dev->vpd->ops->write(dev, pos, count, buf);
99 }
100 EXPORT_SYMBOL(pci_write_vpd);
101 
102 /*
103  * The following routines are to prevent the user from accessing PCI config
104  * space when it's unsafe to do so.  Some devices require this during BIST and
105  * we're required to prevent it during D-state transitions.
106  *
107  * We have a bit per device to indicate it's blocked and a global wait queue
108  * for callers to sleep on until devices are unblocked.
109  */
110 static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
111 
pci_wait_ucfg(struct pci_dev * dev)112 static noinline void pci_wait_ucfg(struct pci_dev *dev)
113 {
114 	DECLARE_WAITQUEUE(wait, current);
115 
116 	__add_wait_queue(&pci_ucfg_wait, &wait);
117 	do {
118 		set_current_state(TASK_UNINTERRUPTIBLE);
119 		spin_unlock_irq(&pci_lock);
120 		schedule();
121 		spin_lock_irq(&pci_lock);
122 	} while (dev->block_ucfg_access);
123 	__remove_wait_queue(&pci_ucfg_wait, &wait);
124 }
125 
126 #define PCI_USER_READ_CONFIG(size,type)					\
127 int pci_user_read_config_##size						\
128 	(struct pci_dev *dev, int pos, type *val)			\
129 {									\
130 	int ret = 0;							\
131 	u32 data = -1;							\
132 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
133 	spin_lock_irq(&pci_lock);					\
134 	if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev);	\
135 	ret = dev->bus->ops->read(dev->bus, dev->devfn,			\
136 					pos, sizeof(type), &data);	\
137 	spin_unlock_irq(&pci_lock);					\
138 	*val = (type)data;						\
139 	return ret;							\
140 }
141 
142 #define PCI_USER_WRITE_CONFIG(size,type)				\
143 int pci_user_write_config_##size					\
144 	(struct pci_dev *dev, int pos, type val)			\
145 {									\
146 	int ret = -EIO;							\
147 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
148 	spin_lock_irq(&pci_lock);					\
149 	if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev);	\
150 	ret = dev->bus->ops->write(dev->bus, dev->devfn,		\
151 					pos, sizeof(type), val);	\
152 	spin_unlock_irq(&pci_lock);					\
153 	return ret;							\
154 }
155 
156 PCI_USER_READ_CONFIG(byte, u8)
157 PCI_USER_READ_CONFIG(word, u16)
158 PCI_USER_READ_CONFIG(dword, u32)
159 PCI_USER_WRITE_CONFIG(byte, u8)
160 PCI_USER_WRITE_CONFIG(word, u16)
161 PCI_USER_WRITE_CONFIG(dword, u32)
162 
163 /* VPD access through PCI 2.2+ VPD capability */
164 
165 #define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)
166 
167 struct pci_vpd_pci22 {
168 	struct pci_vpd base;
169 	struct mutex lock;
170 	u16	flag;
171 	bool	busy;
172 	u8	cap;
173 };
174 
175 /*
176  * Wait for last operation to complete.
177  * This code has to spin since there is no other notification from the PCI
178  * hardware. Since the VPD is often implemented by serial attachment to an
179  * EEPROM, it may take many milliseconds to complete.
180  */
pci_vpd_pci22_wait(struct pci_dev * dev)181 static int pci_vpd_pci22_wait(struct pci_dev *dev)
182 {
183 	struct pci_vpd_pci22 *vpd =
184 		container_of(dev->vpd, struct pci_vpd_pci22, base);
185 	unsigned long timeout = jiffies + HZ/20 + 2;
186 	u16 status;
187 	int ret;
188 
189 	if (!vpd->busy)
190 		return 0;
191 
192 	for (;;) {
193 		ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
194 						&status);
195 		if (ret)
196 			return ret;
197 
198 		if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
199 			vpd->busy = false;
200 			return 0;
201 		}
202 
203 		if (time_after(jiffies, timeout))
204 			return -ETIMEDOUT;
205 		if (fatal_signal_pending(current))
206 			return -EINTR;
207 		if (!cond_resched())
208 			udelay(10);
209 	}
210 }
211 
pci_vpd_pci22_read(struct pci_dev * dev,loff_t pos,size_t count,void * arg)212 static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
213 				  void *arg)
214 {
215 	struct pci_vpd_pci22 *vpd =
216 		container_of(dev->vpd, struct pci_vpd_pci22, base);
217 	int ret;
218 	loff_t end = pos + count;
219 	u8 *buf = arg;
220 
221 	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
222 		return -EINVAL;
223 
224 	if (mutex_lock_killable(&vpd->lock))
225 		return -EINTR;
226 
227 	ret = pci_vpd_pci22_wait(dev);
228 	if (ret < 0)
229 		goto out;
230 
231 	while (pos < end) {
232 		u32 val;
233 		unsigned int i, skip;
234 
235 		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
236 						 pos & ~3);
237 		if (ret < 0)
238 			break;
239 		vpd->busy = true;
240 		vpd->flag = PCI_VPD_ADDR_F;
241 		ret = pci_vpd_pci22_wait(dev);
242 		if (ret < 0)
243 			break;
244 
245 		ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
246 		if (ret < 0)
247 			break;
248 
249 		skip = pos & 3;
250 		for (i = 0;  i < sizeof(u32); i++) {
251 			if (i >= skip) {
252 				*buf++ = val;
253 				if (++pos == end)
254 					break;
255 			}
256 			val >>= 8;
257 		}
258 	}
259 out:
260 	mutex_unlock(&vpd->lock);
261 	return ret ? ret : count;
262 }
263 
pci_vpd_pci22_write(struct pci_dev * dev,loff_t pos,size_t count,const void * arg)264 static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
265 				   const void *arg)
266 {
267 	struct pci_vpd_pci22 *vpd =
268 		container_of(dev->vpd, struct pci_vpd_pci22, base);
269 	const u8 *buf = arg;
270 	loff_t end = pos + count;
271 	int ret = 0;
272 
273 	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
274 		return -EINVAL;
275 
276 	if (mutex_lock_killable(&vpd->lock))
277 		return -EINTR;
278 
279 	ret = pci_vpd_pci22_wait(dev);
280 	if (ret < 0)
281 		goto out;
282 
283 	while (pos < end) {
284 		u32 val;
285 
286 		val = *buf++;
287 		val |= *buf++ << 8;
288 		val |= *buf++ << 16;
289 		val |= *buf++ << 24;
290 
291 		ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
292 		if (ret < 0)
293 			break;
294 		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
295 						 pos | PCI_VPD_ADDR_F);
296 		if (ret < 0)
297 			break;
298 
299 		vpd->busy = true;
300 		vpd->flag = 0;
301 		ret = pci_vpd_pci22_wait(dev);
302 
303 		pos += sizeof(u32);
304 	}
305 out:
306 	mutex_unlock(&vpd->lock);
307 	return ret ? ret : count;
308 }
309 
pci_vpd_pci22_release(struct pci_dev * dev)310 static void pci_vpd_pci22_release(struct pci_dev *dev)
311 {
312 	kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
313 }
314 
315 static const struct pci_vpd_ops pci_vpd_pci22_ops = {
316 	.read = pci_vpd_pci22_read,
317 	.write = pci_vpd_pci22_write,
318 	.release = pci_vpd_pci22_release,
319 };
320 
pci_vpd_pci22_init(struct pci_dev * dev)321 int pci_vpd_pci22_init(struct pci_dev *dev)
322 {
323 	struct pci_vpd_pci22 *vpd;
324 	u8 cap;
325 
326 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
327 	if (!cap)
328 		return -ENODEV;
329 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
330 	if (!vpd)
331 		return -ENOMEM;
332 
333 	vpd->base.len = PCI_VPD_PCI22_SIZE;
334 	vpd->base.ops = &pci_vpd_pci22_ops;
335 	mutex_init(&vpd->lock);
336 	vpd->cap = cap;
337 	vpd->busy = false;
338 	dev->vpd = &vpd->base;
339 	return 0;
340 }
341 
342 /**
343  * pci_vpd_truncate - Set available Vital Product Data size
344  * @dev:	pci device struct
345  * @size:	available memory in bytes
346  *
347  * Adjust size of available VPD area.
348  */
pci_vpd_truncate(struct pci_dev * dev,size_t size)349 int pci_vpd_truncate(struct pci_dev *dev, size_t size)
350 {
351 	if (!dev->vpd)
352 		return -EINVAL;
353 
354 	/* limited by the access method */
355 	if (size > dev->vpd->len)
356 		return -EINVAL;
357 
358 	dev->vpd->len = size;
359 	dev->vpd->attr->size = size;
360 
361 	return 0;
362 }
363 EXPORT_SYMBOL(pci_vpd_truncate);
364 
365 /**
366  * pci_block_user_cfg_access - Block userspace PCI config reads/writes
367  * @dev:	pci device struct
368  *
369  * When user access is blocked, any reads or writes to config space will
370  * sleep until access is unblocked again.  We don't allow nesting of
371  * block/unblock calls.
372  */
pci_block_user_cfg_access(struct pci_dev * dev)373 void pci_block_user_cfg_access(struct pci_dev *dev)
374 {
375 	unsigned long flags;
376 	int was_blocked;
377 
378 	spin_lock_irqsave(&pci_lock, flags);
379 	was_blocked = dev->block_ucfg_access;
380 	dev->block_ucfg_access = 1;
381 	spin_unlock_irqrestore(&pci_lock, flags);
382 
383 	/* If we BUG() inside the pci_lock, we're guaranteed to hose
384 	 * the machine */
385 	BUG_ON(was_blocked);
386 }
387 EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
388 
389 /**
390  * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
391  * @dev:	pci device struct
392  *
393  * This function allows userspace PCI config accesses to resume.
394  */
pci_unblock_user_cfg_access(struct pci_dev * dev)395 void pci_unblock_user_cfg_access(struct pci_dev *dev)
396 {
397 	unsigned long flags;
398 
399 	spin_lock_irqsave(&pci_lock, flags);
400 
401 	/* This indicates a problem in the caller, but we don't need
402 	 * to kill them, unlike a double-block above. */
403 	WARN_ON(!dev->block_ucfg_access);
404 
405 	dev->block_ucfg_access = 0;
406 	wake_up_all(&pci_ucfg_wait);
407 	spin_unlock_irqrestore(&pci_lock, flags);
408 }
409 EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
410