1 /*
2 * (C) Copyright IBM Corporation 2006
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * IBM AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25 /**
26 * \file common_interface.c
27 * Platform independent interface glue.
28 *
29 * \author Ian Romanick <idr@us.ibm.com>
30 */
31 #ifdef HAVE_CONFIG_H
32 #include "config.h"
33 #endif
34
35 #include <stdlib.h>
36 #include <string.h>
37 #include <errno.h>
38
39 #include "pciaccess.h"
40 #include "pciaccess_private.h"
41
42 #if defined(__linux__) || defined(__GLIBC__) || defined(__CYGWIN__)
43 #include <byteswap.h>
44
45 #if __BYTE_ORDER == __BIG_ENDIAN
46 # define LETOH_16(x) bswap_16(x)
47 # define HTOLE_16(x) bswap_16(x)
48 # define LETOH_32(x) bswap_32(x)
49 # define HTOLE_32(x) bswap_32(x)
50 #else
51 # define LETOH_16(x) (x)
52 # define HTOLE_16(x) (x)
53 # define LETOH_32(x) (x)
54 # define HTOLE_32(x) (x)
55 #endif /* linux */
56
57 #elif defined(__sun)
58
59 #include <sys/byteorder.h>
60
61 #ifdef _BIG_ENDIAN
62 # define LETOH_16(x) BSWAP_16(x)
63 # define HTOLE_16(x) BSWAP_16(x)
64 # define LETOH_32(x) BSWAP_32(x)
65 # define HTOLE_32(x) BSWAP_32(x)
66 #else
67 # define LETOH_16(x) (x)
68 # define HTOLE_16(x) (x)
69 # define LETOH_32(x) (x)
70 # define HTOLE_32(x) (x)
71 #endif /* Solaris */
72
73 #elif defined(__APPLE__)
74 #include <libkern/OSByteOrder.h>
75
76 #define htobe16(x) OSSwapHostToBigInt16(x)
77 #define htole16(x) OSSwapHostToLittleInt16(x)
78 #define be16toh(x) OSSwapBigToHostInt16(x)
79 #define le16toh(x) OSSwapLittleToHostInt16(x)
80
81 #define htobe32(x) OSSwapHostToBigInt32(x)
82 #define htole32(x) OSSwapHostToLittleInt32(x)
83 #define be32toh(x) OSSwapBigToHostInt32(x)
84 #define le32toh(x) OSSwapLittleToHostInt32(x)
85
86 #define htobe64(x) OSSwapHostToBigInt64(x)
87 #define htole64(x) OSSwapHostToLittleInt64(x)
88 #define be64toh(x) OSSwapBigToHostInt64(x)
89 #define le64toh(x) OSSwapLittleToHostInt64(x)
90
91 #else
92
93 #include <sys/endian.h>
94
95 #define HTOLE_16(x) htole16(x)
96 #define HTOLE_32(x) htole32(x)
97
98 #if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
99 #define LETOH_16(x) le16toh(x)
100 #define LETOH_32(x) le32toh(x)
101 #else
102 #define LETOH_16(x) letoh16(x)
103 #define LETOH_32(x) letoh32(x)
104 #endif
105
106 #endif /* others */
107
108 /**
109 * Read a device's expansion ROM.
110 *
111 * Reads the device's expansion ROM and stores the data in the memory pointed
112 * to by \c buffer. The buffer must be at least \c pci_device::rom_size
113 * bytes.
114 *
115 * \param dev Device whose expansion ROM is to be read.
116 * \param buffer Memory in which to store the ROM.
117 *
118 * \return
119 * Zero on success or an \c errno value on failure.
120 */
121 int
pci_device_read_rom(struct pci_device * dev,void * buffer)122 pci_device_read_rom( struct pci_device * dev, void * buffer )
123 {
124 if ( (dev == NULL) || (buffer == NULL) ) {
125 return EFAULT;
126 }
127
128
129 return (pci_sys->methods->read_rom)( dev, buffer );
130 }
131
132 /**
133 * Probe a PCI (VGA) device to determine if its the boot VGA device
134 *
135 * \param dev Device whose VGA status to query
136 * \return
137 * Zero if not the boot VGA, 1 if the boot VGA.
138 */
139 int
pci_device_is_boot_vga(struct pci_device * dev)140 pci_device_is_boot_vga( struct pci_device * dev )
141 {
142 if (!pci_sys->methods->boot_vga)
143 return 0;
144 return pci_sys->methods->boot_vga( dev );
145 }
146
147 /**
148 * Probe a PCI device to determine if a kernel driver is attached.
149 *
150 * \param dev Device to query
151 * \return
152 * Zero if no driver attached, 1 if attached kernel drviver
153 */
154 int
pci_device_has_kernel_driver(struct pci_device * dev)155 pci_device_has_kernel_driver( struct pci_device * dev )
156 {
157 if (!pci_sys->methods->has_kernel_driver)
158 return 0;
159 return pci_sys->methods->has_kernel_driver( dev );
160 }
161
162 /**
163 * Probe a PCI device to learn information about the device.
164 *
165 * Probes a PCI device to learn various information about the device. Before
166 * calling this function, the only public fields in the \c pci_device
167 * structure that have valid values are \c pci_device::domain,
168 * \c pci_device::bus, \c pci_device::dev, and \c pci_device::func.
169 *
170 * \param dev Device to be probed.
171 *
172 * \return
173 * Zero on success or an \c errno value on failure.
174 */
175 int
pci_device_probe(struct pci_device * dev)176 pci_device_probe( struct pci_device * dev )
177 {
178 if ( dev == NULL ) {
179 return EFAULT;
180 }
181
182
183 return (pci_sys->methods->probe)( dev );
184 }
185
186
187 /**
188 * Map the specified BAR so that it can be accessed by the CPU.
189 *
190 * Maps the specified BAR for access by the processor. The pointer to the
191 * mapped region is stored in the \c pci_mem_region::memory pointer for the
192 * BAR.
193 *
194 * \param dev Device whose memory region is to be mapped.
195 * \param region Region, on the range [0, 5], that is to be mapped.
196 * \param write_enable Map for writing (non-zero).
197 *
198 * \return
199 * Zero on success or an \c errno value on failure.
200 *
201 * \sa pci_device_map_range, pci_device_unmap_range
202 * \deprecated
203 */
204 int
pci_device_map_region(struct pci_device * dev,unsigned region,int write_enable)205 pci_device_map_region(struct pci_device * dev, unsigned region,
206 int write_enable)
207 {
208 const unsigned map_flags =
209 (write_enable) ? PCI_DEV_MAP_FLAG_WRITABLE : 0;
210
211 if ((region > 5) || (dev->regions[region].size == 0)) {
212 return ENOENT;
213 }
214
215 if (dev->regions[region].memory != NULL) {
216 return 0;
217 }
218
219 return pci_device_map_range(dev, dev->regions[region].base_addr,
220 dev->regions[region].size, map_flags,
221 &dev->regions[region].memory);
222 }
223
224
225 /**
226 * Map the specified memory range so that it can be accessed by the CPU.
227 *
228 * Maps the specified memory range for access by the processor. The pointer
229 * to the mapped region is stored in \c addr. In addition, the
230 * \c pci_mem_region::memory pointer for the BAR will be updated.
231 *
232 * \param dev Device whose memory region is to be mapped.
233 * \param base Base address of the range to be mapped.
234 * \param size Size of the range to be mapped.
235 * \param write_enable Map for writing (non-zero).
236 * \param addr Location to store the mapped address.
237 *
238 * \return
239 * Zero on success or an \c errno value on failure.
240 *
241 * \sa pci_device_map_range
242 */
pci_device_map_memory_range(struct pci_device * dev,pciaddr_t base,pciaddr_t size,int write_enable,void ** addr)243 int pci_device_map_memory_range(struct pci_device *dev,
244 pciaddr_t base, pciaddr_t size,
245 int write_enable, void **addr)
246 {
247 return pci_device_map_range(dev, base, size,
248 (write_enable) ? PCI_DEV_MAP_FLAG_WRITABLE : 0,
249 addr);
250 }
251
252
253 /**
254 * Map the specified memory range so that it can be accessed by the CPU.
255 *
256 * Maps the specified memory range for access by the processor. The pointer
257 * to the mapped region is stored in \c addr. In addition, the
258 * \c pci_mem_region::memory pointer for the BAR will be updated.
259 *
260 * \param dev Device whose memory region is to be mapped.
261 * \param base Base address of the range to be mapped.
262 * \param size Size of the range to be mapped.
263 * \param map_flags Flag bits controlling how the mapping is accessed.
264 * \param addr Location to store the mapped address.
265 *
266 * \return
267 * Zero on success or an \c errno value on failure.
268 *
269 * \sa pci_device_unmap_range
270 */
271 int
pci_device_map_range(struct pci_device * dev,pciaddr_t base,pciaddr_t size,unsigned map_flags,void ** addr)272 pci_device_map_range(struct pci_device *dev, pciaddr_t base,
273 pciaddr_t size, unsigned map_flags,
274 void **addr)
275 {
276 struct pci_device_private *const devp =
277 (struct pci_device_private *) dev;
278 struct pci_device_mapping *mappings;
279 unsigned region;
280 unsigned i;
281 int err = 0;
282
283
284 *addr = NULL;
285
286 if (dev == NULL) {
287 return EFAULT;
288 }
289
290
291 for (region = 0; region < 6; region++) {
292 const struct pci_mem_region * const r = &dev->regions[region];
293
294 if (r->size != 0) {
295 if ((r->base_addr <= base) && ((r->base_addr + r->size) > base)) {
296 if ((base + size) > (r->base_addr + r->size)) {
297 return E2BIG;
298 }
299
300 break;
301 }
302 }
303 }
304
305 if (region > 5) {
306 return ENOENT;
307 }
308
309 /* Make sure that there isn't already a mapping with the same base and
310 * size.
311 */
312 for (i = 0; i < devp->num_mappings; i++) {
313 if ((devp->mappings[i].base == base)
314 && (devp->mappings[i].size == size)) {
315 return EINVAL;
316 }
317 }
318
319
320 mappings = realloc(devp->mappings,
321 (sizeof(devp->mappings[0]) * (devp->num_mappings + 1)));
322 if (mappings == NULL) {
323 return ENOMEM;
324 }
325
326 mappings[devp->num_mappings].base = base;
327 mappings[devp->num_mappings].size = size;
328 mappings[devp->num_mappings].region = region;
329 mappings[devp->num_mappings].flags = map_flags;
330 mappings[devp->num_mappings].memory = NULL;
331
332 if (dev->regions[region].memory == NULL) {
333 err = (*pci_sys->methods->map_range)(dev,
334 &mappings[devp->num_mappings]);
335 }
336
337 if (err == 0) {
338 *addr = mappings[devp->num_mappings].memory;
339 devp->num_mappings++;
340 } else {
341 mappings = realloc(mappings,
342 (sizeof(mappings[0]) * devp->num_mappings));
343 }
344
345 devp->mappings = mappings;
346
347 return err;
348 }
349
350
351 /**
352 * Unmap the specified BAR so that it can no longer be accessed by the CPU.
353 *
354 * Unmaps the specified BAR that was previously mapped via
355 * \c pci_device_map_region.
356 *
357 * \param dev Device whose memory region is to be mapped.
358 * \param region Region, on the range [0, 5], that is to be mapped.
359 *
360 * \return
361 * Zero on success or an \c errno value on failure.
362 *
363 * \sa pci_device_map_range, pci_device_unmap_range
364 * \deprecated
365 */
366 int
pci_device_unmap_region(struct pci_device * dev,unsigned region)367 pci_device_unmap_region( struct pci_device * dev, unsigned region )
368 {
369 int err;
370
371 if (dev == NULL) {
372 return EFAULT;
373 }
374
375 if ((region > 5) || (dev->regions[region].size == 0)) {
376 return ENOENT;
377 }
378
379 err = pci_device_unmap_range(dev, dev->regions[region].memory,
380 dev->regions[region].size);
381 if (!err) {
382 dev->regions[region].memory = NULL;
383 }
384
385 return err;
386 }
387
388
389 /**
390 * Unmap the specified memory range so that it can no longer be accessed by the CPU.
391 *
392 * Unmaps the specified memory range that was previously mapped via
393 * \c pci_device_map_memory_range.
394 *
395 * \param dev Device whose memory is to be unmapped.
396 * \param memory Pointer to the base of the mapped range.
397 * \param size Size, in bytes, of the range to be unmapped.
398 *
399 * \return
400 * Zero on success or an \c errno value on failure.
401 *
402 * \sa pci_device_map_range, pci_device_unmap_range
403 * \deprecated
404 */
405 int
pci_device_unmap_memory_range(struct pci_device * dev,void * memory,pciaddr_t size)406 pci_device_unmap_memory_range(struct pci_device *dev, void *memory,
407 pciaddr_t size)
408 {
409 return pci_device_unmap_range(dev, memory, size);
410 }
411
412
413 /**
414 * Unmap the specified memory range so that it can no longer be accessed by the CPU.
415 *
416 * Unmaps the specified memory range that was previously mapped via
417 * \c pci_device_map_memory_range.
418 *
419 * \param dev Device whose memory is to be unmapped.
420 * \param memory Pointer to the base of the mapped range.
421 * \param size Size, in bytes, of the range to be unmapped.
422 *
423 * \return
424 * Zero on success or an \c errno value on failure.
425 *
426 * \sa pci_device_map_range
427 */
428 int
pci_device_unmap_range(struct pci_device * dev,void * memory,pciaddr_t size)429 pci_device_unmap_range(struct pci_device *dev, void *memory,
430 pciaddr_t size)
431 {
432 struct pci_device_private *const devp =
433 (struct pci_device_private *) dev;
434 unsigned i;
435 int err;
436
437
438 if (dev == NULL) {
439 return EFAULT;
440 }
441
442 for (i = 0; i < devp->num_mappings; i++) {
443 if ((devp->mappings[i].memory == memory)
444 && (devp->mappings[i].size == size)) {
445 break;
446 }
447 }
448
449 if (i == devp->num_mappings) {
450 return ENOENT;
451 }
452
453
454 err = (*pci_sys->methods->unmap_range)(dev, &devp->mappings[i]);
455 if (!err) {
456 const unsigned entries_to_move = (devp->num_mappings - i) - 1;
457
458 if (entries_to_move > 0) {
459 (void) memmove(&devp->mappings[i],
460 &devp->mappings[i + 1],
461 entries_to_move * sizeof(devp->mappings[0]));
462 }
463
464 devp->num_mappings--;
465 devp->mappings = realloc(devp->mappings,
466 (sizeof(devp->mappings[0]) * devp->num_mappings));
467 }
468
469 return err;
470 }
471
472
473 /**
474 * Read arbitrary bytes from device's PCI config space
475 *
476 * Reads data from the device's PCI configuration space. As with the system
477 * read command, less data may be returned, without an error, than was
478 * requested. This is particularly the case if a non-root user tries to read
479 * beyond the first 64-bytes of configuration space.
480 *
481 * \param dev Device whose PCI configuration data is to be read.
482 * \param data Location to store the data
483 * \param offset Initial byte offset to read
484 * \param size Total number of bytes to read
485 * \param bytes_read Location to store the actual number of bytes read. This
486 * pointer may be \c NULL.
487 *
488 * \returns
489 * Zero on success or an errno value on failure.
490 *
491 * \note
492 * Data read from PCI configuration space using this routine is \b not
493 * byte-swapped to the host's byte order. PCI configuration data is always
494 * stored in little-endian order, and that is what this routine returns.
495 */
496 int
pci_device_cfg_read(struct pci_device * dev,void * data,pciaddr_t offset,pciaddr_t size,pciaddr_t * bytes_read)497 pci_device_cfg_read( struct pci_device * dev, void * data,
498 pciaddr_t offset, pciaddr_t size,
499 pciaddr_t * bytes_read )
500 {
501 pciaddr_t scratch;
502
503 if ( (dev == NULL) || (data == NULL) ) {
504 return EFAULT;
505 }
506
507 return pci_sys->methods->read( dev, data, offset, size,
508 (bytes_read == NULL)
509 ? & scratch : bytes_read );
510 }
511
512
513 int
pci_device_cfg_read_u8(struct pci_device * dev,uint8_t * data,pciaddr_t offset)514 pci_device_cfg_read_u8( struct pci_device * dev, uint8_t * data,
515 pciaddr_t offset )
516 {
517 pciaddr_t bytes;
518 int err = pci_device_cfg_read( dev, data, offset, 1, & bytes );
519
520 if ( (err == 0) && (bytes != 1) ) {
521 err = ENXIO;
522 }
523
524 return err;
525 }
526
527
528 int
pci_device_cfg_read_u16(struct pci_device * dev,uint16_t * data,pciaddr_t offset)529 pci_device_cfg_read_u16( struct pci_device * dev, uint16_t * data,
530 pciaddr_t offset )
531 {
532 pciaddr_t bytes;
533 int err = pci_device_cfg_read( dev, data, offset, 2, & bytes );
534
535 if ( (err == 0) && (bytes != 2) ) {
536 err = ENXIO;
537 }
538
539 *data = LETOH_16( *data );
540 return err;
541 }
542
543
544 int
pci_device_cfg_read_u32(struct pci_device * dev,uint32_t * data,pciaddr_t offset)545 pci_device_cfg_read_u32( struct pci_device * dev, uint32_t * data,
546 pciaddr_t offset )
547 {
548 pciaddr_t bytes;
549 int err = pci_device_cfg_read( dev, data, offset, 4, & bytes );
550
551 if ( (err == 0) && (bytes != 4) ) {
552 err = ENXIO;
553 }
554
555 *data = LETOH_32( *data );
556 return err;
557 }
558
559
560 /**
561 * Write arbitrary bytes to device's PCI config space
562 *
563 * Writes data to the device's PCI configuration space. As with the system
564 * write command, less data may be written, without an error, than was
565 * requested.
566 *
567 * \param dev Device whose PCI configuration data is to be written.
568 * \param data Location of the source data
569 * \param offset Initial byte offset to write
570 * \param size Total number of bytes to write
571 * \param bytes_read Location to store the actual number of bytes written.
572 * This pointer may be \c NULL.
573 *
574 * \returns
575 * Zero on success or an errno value on failure.
576 *
577 * \note
578 * Data written to PCI configuration space using this routine is \b not
579 * byte-swapped from the host's byte order. PCI configuration data is always
580 * stored in little-endian order, so data written with this routine should be
581 * put in that order in advance.
582 */
583 int
pci_device_cfg_write(struct pci_device * dev,const void * data,pciaddr_t offset,pciaddr_t size,pciaddr_t * bytes_written)584 pci_device_cfg_write( struct pci_device * dev, const void * data,
585 pciaddr_t offset, pciaddr_t size,
586 pciaddr_t * bytes_written )
587 {
588 pciaddr_t scratch;
589
590 if ( (dev == NULL) || (data == NULL) ) {
591 return EFAULT;
592 }
593
594 return pci_sys->methods->write( dev, data, offset, size,
595 (bytes_written == NULL)
596 ? & scratch : bytes_written );
597 }
598
599
600 int
pci_device_cfg_write_u8(struct pci_device * dev,uint8_t data,pciaddr_t offset)601 pci_device_cfg_write_u8(struct pci_device *dev, uint8_t data,
602 pciaddr_t offset)
603 {
604 pciaddr_t bytes;
605 int err = pci_device_cfg_write(dev, & data, offset, 1, & bytes);
606
607 if ( (err == 0) && (bytes != 1) ) {
608 err = ENOSPC;
609 }
610
611
612 return err;
613 }
614
615
616 int
pci_device_cfg_write_u16(struct pci_device * dev,uint16_t data,pciaddr_t offset)617 pci_device_cfg_write_u16(struct pci_device *dev, uint16_t data,
618 pciaddr_t offset)
619 {
620 pciaddr_t bytes;
621 const uint16_t temp = HTOLE_16(data);
622 int err = pci_device_cfg_write( dev, & temp, offset, 2, & bytes );
623
624 if ( (err == 0) && (bytes != 2) ) {
625 err = ENOSPC;
626 }
627
628
629 return err;
630 }
631
632
633 int
pci_device_cfg_write_u32(struct pci_device * dev,uint32_t data,pciaddr_t offset)634 pci_device_cfg_write_u32(struct pci_device *dev, uint32_t data,
635 pciaddr_t offset)
636 {
637 pciaddr_t bytes;
638 const uint32_t temp = HTOLE_32(data);
639 int err = pci_device_cfg_write( dev, & temp, offset, 4, & bytes );
640
641 if ( (err == 0) && (bytes != 4) ) {
642 err = ENOSPC;
643 }
644
645
646 return err;
647 }
648
649
650 int
pci_device_cfg_write_bits(struct pci_device * dev,uint32_t mask,uint32_t data,pciaddr_t offset)651 pci_device_cfg_write_bits( struct pci_device * dev, uint32_t mask,
652 uint32_t data, pciaddr_t offset )
653 {
654 uint32_t temp;
655 int err;
656
657 err = pci_device_cfg_read_u32( dev, & temp, offset );
658 if ( ! err ) {
659 temp &= ~mask;
660 temp |= data;
661
662 err = pci_device_cfg_write_u32(dev, temp, offset);
663 }
664
665 return err;
666 }
667
668 void
pci_device_enable(struct pci_device * dev)669 pci_device_enable(struct pci_device *dev)
670 {
671 if (dev == NULL) {
672 return;
673 }
674
675 if (pci_sys->methods->enable)
676 pci_sys->methods->enable(dev);
677 }
678
679 void
pci_device_disable(struct pci_device * dev)680 pci_device_disable(struct pci_device *dev)
681 {
682 if (dev == NULL)
683 return;
684
685 if (pci_sys->methods->disable)
686 pci_sys->methods->disable(dev);
687 }
688
689 /**
690 * Map the legacy memory space for the PCI domain containing \c dev.
691 *
692 * \param dev Device whose memory region is to be mapped.
693 * \param base Base address of the range to be mapped.
694 * \param size Size of the range to be mapped.
695 * \param map_flags Flag bits controlling how the mapping is accessed.
696 * \param addr Location to store the mapped address.
697 *
698 * \returns
699 * Zero on success or an \c errno value on failure.
700 */
701 int
pci_device_map_legacy(struct pci_device * dev,pciaddr_t base,pciaddr_t size,unsigned map_flags,void ** addr)702 pci_device_map_legacy(struct pci_device *dev, pciaddr_t base, pciaddr_t size,
703 unsigned map_flags, void **addr)
704 {
705 if (base > 0x100000 || base + size > 0x100000)
706 return EINVAL;
707
708 if (!pci_sys->methods->map_legacy)
709 return ENOSYS;
710
711 return pci_sys->methods->map_legacy(dev, base, size, map_flags, addr);
712 }
713
714 /**
715 * Unmap the legacy memory space for the PCI domain containing \c dev.
716 *
717 * \param dev Device whose memory region is to be unmapped.
718 * \param addr Location of the mapped address.
719 * \param size Size of the range to be unmapped.
720 *
721 * \returns
722 * Zero on success or an \c errno value on failure.
723 */
724 int
pci_device_unmap_legacy(struct pci_device * dev,void * addr,pciaddr_t size)725 pci_device_unmap_legacy(struct pci_device *dev, void *addr, pciaddr_t size)
726 {
727 if (!pci_sys->methods->unmap_legacy)
728 return ENOSYS;
729
730 return pci_sys->methods->unmap_legacy(dev, addr, size);
731 }
732