1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * DMA translation between STA2x11 AMBA memory mapping and the x86 memory mapping
4 *
5 * ST Microelectronics ConneXt (STA2X11/STA2X10)
6 *
7 * Copyright (c) 2010-2011 Wind River Systems, Inc.
8 */
9
10 #include <linux/pci.h>
11 #include <linux/pci_ids.h>
12 #include <linux/export.h>
13 #include <linux/list.h>
14 #include <linux/dma-direct.h>
15 #include <asm/iommu.h>
16
17 #define STA2X11_SWIOTLB_SIZE (4*1024*1024)
18 extern int swiotlb_late_init_with_default_size(size_t default_size);
19
20 /*
21 * We build a list of bus numbers that are under the ConneXt. The
22 * main bridge hosts 4 busses, which are the 4 endpoints, in order.
23 */
24 #define STA2X11_NR_EP 4 /* 0..3 included */
25 #define STA2X11_NR_FUNCS 8 /* 0..7 included */
26 #define STA2X11_AMBA_SIZE (512 << 20)
27
28 struct sta2x11_ahb_regs { /* saved during suspend */
29 u32 base, pexlbase, pexhbase, crw;
30 };
31
32 struct sta2x11_mapping {
33 u32 amba_base;
34 int is_suspended;
35 struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
36 };
37
38 struct sta2x11_instance {
39 struct list_head list;
40 int bus0;
41 struct sta2x11_mapping map[STA2X11_NR_EP];
42 };
43
44 static LIST_HEAD(sta2x11_instance_list);
45
46 /* At probe time, record new instances of this bridge (likely one only) */
sta2x11_new_instance(struct pci_dev * pdev)47 static void sta2x11_new_instance(struct pci_dev *pdev)
48 {
49 struct sta2x11_instance *instance;
50
51 instance = kzalloc(sizeof(*instance), GFP_ATOMIC);
52 if (!instance)
53 return;
54 /* This has a subordinate bridge, with 4 more-subordinate ones */
55 instance->bus0 = pdev->subordinate->number + 1;
56
57 if (list_empty(&sta2x11_instance_list)) {
58 int size = STA2X11_SWIOTLB_SIZE;
59 /* First instance: register your own swiotlb area */
60 dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
61 if (swiotlb_late_init_with_default_size(size))
62 dev_emerg(&pdev->dev, "init swiotlb failed\n");
63 }
64 list_add(&instance->list, &sta2x11_instance_list);
65 }
66 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, 0xcc17, sta2x11_new_instance);
67
68 /*
69 * Utility functions used in this file from below
70 */
sta2x11_pdev_to_instance(struct pci_dev * pdev)71 static struct sta2x11_instance *sta2x11_pdev_to_instance(struct pci_dev *pdev)
72 {
73 struct sta2x11_instance *instance;
74 int ep;
75
76 list_for_each_entry(instance, &sta2x11_instance_list, list) {
77 ep = pdev->bus->number - instance->bus0;
78 if (ep >= 0 && ep < STA2X11_NR_EP)
79 return instance;
80 }
81 return NULL;
82 }
83
sta2x11_pdev_to_ep(struct pci_dev * pdev)84 static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
85 {
86 struct sta2x11_instance *instance;
87
88 instance = sta2x11_pdev_to_instance(pdev);
89 if (!instance)
90 return -1;
91
92 return pdev->bus->number - instance->bus0;
93 }
94
sta2x11_pdev_to_mapping(struct pci_dev * pdev)95 static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
96 {
97 struct sta2x11_instance *instance;
98 int ep;
99
100 instance = sta2x11_pdev_to_instance(pdev);
101 if (!instance)
102 return NULL;
103 ep = sta2x11_pdev_to_ep(pdev);
104 return instance->map + ep;
105 }
106
107 /* This is exported, as some devices need to access the MFD registers */
sta2x11_get_instance(struct pci_dev * pdev)108 struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
109 {
110 return sta2x11_pdev_to_instance(pdev);
111 }
112 EXPORT_SYMBOL(sta2x11_get_instance);
113
114
115 /**
116 * p2a - Translate physical address to STA2x11 AMBA address,
117 * used for DMA transfers to STA2x11
118 * @p: Physical address
119 * @pdev: PCI device (must be hosted within the connext)
120 */
p2a(dma_addr_t p,struct pci_dev * pdev)121 static dma_addr_t p2a(dma_addr_t p, struct pci_dev *pdev)
122 {
123 struct sta2x11_mapping *map;
124 dma_addr_t a;
125
126 map = sta2x11_pdev_to_mapping(pdev);
127 a = p + map->amba_base;
128 return a;
129 }
130
131 /**
132 * a2p - Translate STA2x11 AMBA address to physical address
133 * used for DMA transfers from STA2x11
134 * @a: STA2x11 AMBA address
135 * @pdev: PCI device (must be hosted within the connext)
136 */
a2p(dma_addr_t a,struct pci_dev * pdev)137 static dma_addr_t a2p(dma_addr_t a, struct pci_dev *pdev)
138 {
139 struct sta2x11_mapping *map;
140 dma_addr_t p;
141
142 map = sta2x11_pdev_to_mapping(pdev);
143 p = a - map->amba_base;
144 return p;
145 }
146
147 /* At setup time, we use our own ops if the device is a ConneXt one */
sta2x11_setup_pdev(struct pci_dev * pdev)148 static void sta2x11_setup_pdev(struct pci_dev *pdev)
149 {
150 struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
151
152 if (!instance) /* either a sta2x11 bridge or another ST device */
153 return;
154 pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
155 pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
156 pdev->dev.archdata.is_sta2x11 = true;
157
158 /* We must enable all devices as master, for audio DMA to work */
159 pci_set_master(pdev);
160 }
161 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
162
163 /*
164 * The following three functions are exported (used in swiotlb: FIXME)
165 */
166 /**
167 * dma_capable - Check if device can manage DMA transfers (FIXME: kill it)
168 * @dev: device for a PCI device
169 * @addr: DMA address
170 * @size: DMA size
171 */
dma_capable(struct device * dev,dma_addr_t addr,size_t size)172 bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
173 {
174 struct sta2x11_mapping *map;
175
176 if (!dev->archdata.is_sta2x11) {
177 if (!dev->dma_mask)
178 return false;
179 return addr + size - 1 <= *dev->dma_mask;
180 }
181
182 map = sta2x11_pdev_to_mapping(to_pci_dev(dev));
183
184 if (!map || (addr < map->amba_base))
185 return false;
186 if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) {
187 return false;
188 }
189
190 return true;
191 }
192
193 /**
194 * __phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
195 * @dev: device for a PCI device
196 * @paddr: Physical address
197 */
__phys_to_dma(struct device * dev,phys_addr_t paddr)198 dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
199 {
200 if (!dev->archdata.is_sta2x11)
201 return paddr;
202 return p2a(paddr, to_pci_dev(dev));
203 }
204
205 /**
206 * dma_to_phys - Return the physical address used for this STA2x11 DMA address
207 * @dev: device for a PCI device
208 * @daddr: STA2x11 AMBA DMA address
209 */
__dma_to_phys(struct device * dev,dma_addr_t daddr)210 phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
211 {
212 if (!dev->archdata.is_sta2x11)
213 return daddr;
214 return a2p(daddr, to_pci_dev(dev));
215 }
216
217
218 /*
219 * At boot we must set up the mappings for the pcie-to-amba bridge.
220 * It involves device access, and the same happens at suspend/resume time
221 */
222
223 #define AHB_MAPB 0xCA4
224 #define AHB_CRW(i) (AHB_MAPB + 0 + (i) * 0x10)
225 #define AHB_CRW_SZMASK 0xfffffc00UL
226 #define AHB_CRW_ENABLE (1 << 0)
227 #define AHB_CRW_WTYPE_MEM (2 << 1)
228 #define AHB_CRW_ROE (1UL << 3) /* Relax Order Ena */
229 #define AHB_CRW_NSE (1UL << 4) /* No Snoop Enable */
230 #define AHB_BASE(i) (AHB_MAPB + 4 + (i) * 0x10)
231 #define AHB_PEXLBASE(i) (AHB_MAPB + 8 + (i) * 0x10)
232 #define AHB_PEXHBASE(i) (AHB_MAPB + 12 + (i) * 0x10)
233
234 /* At probe time, enable mapping for each endpoint, using the pdev */
sta2x11_map_ep(struct pci_dev * pdev)235 static void sta2x11_map_ep(struct pci_dev *pdev)
236 {
237 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
238 int i;
239
240 if (!map)
241 return;
242 pci_read_config_dword(pdev, AHB_BASE(0), &map->amba_base);
243
244 /* Configure AHB mapping */
245 pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
246 pci_write_config_dword(pdev, AHB_PEXHBASE(0), 0);
247 pci_write_config_dword(pdev, AHB_CRW(0), STA2X11_AMBA_SIZE |
248 AHB_CRW_WTYPE_MEM | AHB_CRW_ENABLE);
249
250 /* Disable all the other windows */
251 for (i = 1; i < STA2X11_NR_FUNCS; i++)
252 pci_write_config_dword(pdev, AHB_CRW(i), 0);
253
254 dev_info(&pdev->dev,
255 "sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
256 sta2x11_pdev_to_ep(pdev), map->amba_base,
257 map->amba_base + STA2X11_AMBA_SIZE - 1);
258 }
259 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
260
261 #ifdef CONFIG_PM /* Some register values must be saved and restored */
262
suspend_mapping(struct pci_dev * pdev)263 static void suspend_mapping(struct pci_dev *pdev)
264 {
265 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
266 int i;
267
268 if (!map)
269 return;
270
271 if (map->is_suspended)
272 return;
273 map->is_suspended = 1;
274
275 /* Save all window configs */
276 for (i = 0; i < STA2X11_NR_FUNCS; i++) {
277 struct sta2x11_ahb_regs *regs = map->regs + i;
278
279 pci_read_config_dword(pdev, AHB_BASE(i), ®s->base);
280 pci_read_config_dword(pdev, AHB_PEXLBASE(i), ®s->pexlbase);
281 pci_read_config_dword(pdev, AHB_PEXHBASE(i), ®s->pexhbase);
282 pci_read_config_dword(pdev, AHB_CRW(i), ®s->crw);
283 }
284 }
285 DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, suspend_mapping);
286
resume_mapping(struct pci_dev * pdev)287 static void resume_mapping(struct pci_dev *pdev)
288 {
289 struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
290 int i;
291
292 if (!map)
293 return;
294
295
296 if (!map->is_suspended)
297 goto out;
298 map->is_suspended = 0;
299
300 /* Restore all window configs */
301 for (i = 0; i < STA2X11_NR_FUNCS; i++) {
302 struct sta2x11_ahb_regs *regs = map->regs + i;
303
304 pci_write_config_dword(pdev, AHB_BASE(i), regs->base);
305 pci_write_config_dword(pdev, AHB_PEXLBASE(i), regs->pexlbase);
306 pci_write_config_dword(pdev, AHB_PEXHBASE(i), regs->pexhbase);
307 pci_write_config_dword(pdev, AHB_CRW(i), regs->crw);
308 }
309 out:
310 pci_set_master(pdev); /* Like at boot, enable master on all devices */
311 }
312 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, resume_mapping);
313
314 #endif /* CONFIG_PM */
315