1 /*
2 * VFIO PCI I/O Port & MMIO access
3 *
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
14 */
15
16 #include <linux/fs.h>
17 #include <linux/pci.h>
18 #include <linux/uaccess.h>
19 #include <linux/io.h>
20 #include <linux/vgaarb.h>
21
22 #include "vfio_pci_private.h"
23
24 /*
25 * Read or write from an __iomem region (MMIO or I/O port) with an excluded
26 * range which is inaccessible. The excluded range drops writes and fills
27 * reads with -1. This is intended for handling MSI-X vector tables and
28 * leftover space for ROM BARs.
29 */
do_io_rw(void __iomem * io,char __user * buf,loff_t off,size_t count,size_t x_start,size_t x_end,bool iswrite)30 static ssize_t do_io_rw(void __iomem *io, char __user *buf,
31 loff_t off, size_t count, size_t x_start,
32 size_t x_end, bool iswrite)
33 {
34 ssize_t done = 0;
35
36 while (count) {
37 size_t fillable, filled;
38
39 if (off < x_start)
40 fillable = min(count, (size_t)(x_start - off));
41 else if (off >= x_end)
42 fillable = count;
43 else
44 fillable = 0;
45
46 if (fillable >= 4 && !(off % 4)) {
47 __le32 val;
48
49 if (iswrite) {
50 if (copy_from_user(&val, buf, 4))
51 return -EFAULT;
52
53 iowrite32(le32_to_cpu(val), io + off);
54 } else {
55 val = cpu_to_le32(ioread32(io + off));
56
57 if (copy_to_user(buf, &val, 4))
58 return -EFAULT;
59 }
60
61 filled = 4;
62 } else if (fillable >= 2 && !(off % 2)) {
63 __le16 val;
64
65 if (iswrite) {
66 if (copy_from_user(&val, buf, 2))
67 return -EFAULT;
68
69 iowrite16(le16_to_cpu(val), io + off);
70 } else {
71 val = cpu_to_le16(ioread16(io + off));
72
73 if (copy_to_user(buf, &val, 2))
74 return -EFAULT;
75 }
76
77 filled = 2;
78 } else if (fillable) {
79 u8 val;
80
81 if (iswrite) {
82 if (copy_from_user(&val, buf, 1))
83 return -EFAULT;
84
85 iowrite8(val, io + off);
86 } else {
87 val = ioread8(io + off);
88
89 if (copy_to_user(buf, &val, 1))
90 return -EFAULT;
91 }
92
93 filled = 1;
94 } else {
95 /* Fill reads with -1, drop writes */
96 filled = min(count, (size_t)(x_end - off));
97 if (!iswrite) {
98 u8 val = 0xFF;
99 size_t i;
100
101 for (i = 0; i < filled; i++)
102 if (copy_to_user(buf + i, &val, 1))
103 return -EFAULT;
104 }
105 }
106
107 count -= filled;
108 done += filled;
109 off += filled;
110 buf += filled;
111 }
112
113 return done;
114 }
115
vfio_pci_bar_rw(struct vfio_pci_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)116 ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
117 size_t count, loff_t *ppos, bool iswrite)
118 {
119 struct pci_dev *pdev = vdev->pdev;
120 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
121 int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
122 size_t x_start = 0, x_end = 0;
123 resource_size_t end;
124 void __iomem *io;
125 ssize_t done;
126
127 if (pci_resource_start(pdev, bar))
128 end = pci_resource_len(pdev, bar);
129 else if (bar == PCI_ROM_RESOURCE &&
130 pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
131 end = 0x20000;
132 else
133 return -EINVAL;
134
135 if (pos >= end)
136 return -EINVAL;
137
138 count = min(count, (size_t)(end - pos));
139
140 if (bar == PCI_ROM_RESOURCE) {
141 /*
142 * The ROM can fill less space than the BAR, so we start the
143 * excluded range at the end of the actual ROM. This makes
144 * filling large ROM BARs much faster.
145 */
146 io = pci_map_rom(pdev, &x_start);
147 if (!io)
148 return -ENOMEM;
149 x_end = end;
150 } else if (!vdev->barmap[bar]) {
151 int ret;
152
153 ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
154 if (ret)
155 return ret;
156
157 io = pci_iomap(pdev, bar, 0);
158 if (!io) {
159 pci_release_selected_regions(pdev, 1 << bar);
160 return -ENOMEM;
161 }
162
163 vdev->barmap[bar] = io;
164 } else
165 io = vdev->barmap[bar];
166
167 if (bar == vdev->msix_bar) {
168 x_start = vdev->msix_offset;
169 x_end = vdev->msix_offset + vdev->msix_size;
170 }
171
172 done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
173
174 if (done >= 0)
175 *ppos += done;
176
177 if (bar == PCI_ROM_RESOURCE)
178 pci_unmap_rom(pdev, io);
179
180 return done;
181 }
182
vfio_pci_vga_rw(struct vfio_pci_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)183 ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
184 size_t count, loff_t *ppos, bool iswrite)
185 {
186 int ret;
187 loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
188 void __iomem *iomem = NULL;
189 unsigned int rsrc;
190 bool is_ioport;
191 ssize_t done;
192
193 if (!vdev->has_vga)
194 return -EINVAL;
195
196 if (pos > 0xbfffful)
197 return -EINVAL;
198
199 switch ((u32)pos) {
200 case 0xa0000 ... 0xbffff:
201 count = min(count, (size_t)(0xc0000 - pos));
202 iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
203 off = pos - 0xa0000;
204 rsrc = VGA_RSRC_LEGACY_MEM;
205 is_ioport = false;
206 break;
207 case 0x3b0 ... 0x3bb:
208 count = min(count, (size_t)(0x3bc - pos));
209 iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
210 off = pos - 0x3b0;
211 rsrc = VGA_RSRC_LEGACY_IO;
212 is_ioport = true;
213 break;
214 case 0x3c0 ... 0x3df:
215 count = min(count, (size_t)(0x3e0 - pos));
216 iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
217 off = pos - 0x3c0;
218 rsrc = VGA_RSRC_LEGACY_IO;
219 is_ioport = true;
220 break;
221 default:
222 return -EINVAL;
223 }
224
225 if (!iomem)
226 return -ENOMEM;
227
228 ret = vga_get_interruptible(vdev->pdev, rsrc);
229 if (ret) {
230 is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
231 return ret;
232 }
233
234 done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
235
236 vga_put(vdev->pdev, rsrc);
237
238 is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
239
240 if (done >= 0)
241 *ppos += done;
242
243 return done;
244 }
245