• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/pci.h>
11 #include <linux/export.h>
12 #include <asm/sn/addrs.h>
13 #include <asm/sn/geo.h>
14 #include <asm/sn/pcibr_provider.h>
15 #include <asm/sn/pcibus_provider_defs.h>
16 #include <asm/sn/pcidev.h>
17 #include <asm/sn/pic.h>
18 #include <asm/sn/sn_sal.h>
19 #include <asm/sn/tiocp.h>
20 #include "tio.h"
21 #include "xtalk/xwidgetdev.h"
22 #include "xtalk/hubdev.h"
23 
24 extern int sn_ioif_inited;
25 
26 /* =====================================================================
27  *    DMA MANAGEMENT
28  *
29  *      The Bridge ASIC provides three methods of doing DMA: via a "direct map"
30  *      register available in 32-bit PCI space (which selects a contiguous 2G
31  *	address space on some other widget), via "direct" addressing via 64-bit
32  *      PCI space (all destination information comes from the PCI address,
33  *      including transfer attributes), and via a "mapped" region that allows
34  *      a bunch of different small mappings to be established with the PMU.
35  *
36  *      For efficiency, we most prefer to use the 32bit direct mapping facility,
37  *      since it requires no resource allocations. The advantage of using the
38  *      PMU over the 64-bit direct is that single-cycle PCI addressing can be
39  *      used; the advantage of using 64-bit direct over PMU addressing is that
40  *      we do not have to allocate entries in the PMU.
41  */
42 
43 static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info * info,u64 paddr,size_t req_size,u64 flags,int dma_flags)44 pcibr_dmamap_ate32(struct pcidev_info *info,
45 		   u64 paddr, size_t req_size, u64 flags, int dma_flags)
46 {
47 
48 	struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
49 	struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
50 	    pdi_pcibus_info;
51 	u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
52 					    pdi_linux_pcidev->devfn)) - 1;
53 	int ate_count;
54 	int ate_index;
55 	u64 ate_flags = flags | PCI32_ATE_V;
56 	u64 ate;
57 	u64 pci_addr;
58 	u64 xio_addr;
59 	u64 offset;
60 
61 	/* PIC in PCI-X mode does not supports 32bit PageMap mode */
62 	if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
63 		return 0;
64 	}
65 
66 	/* Calculate the number of ATEs needed. */
67 	if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
68 		ate_count = IOPG((IOPGSIZE - 1)	/* worst case start offset */
69 				 +req_size	/* max mapping bytes */
70 				 - 1) + 1;	/* round UP */
71 	} else {		/* assume requested target is page aligned */
72 		ate_count = IOPG(req_size	/* max mapping bytes */
73 				 - 1) + 1;	/* round UP */
74 	}
75 
76 	/* Get the number of ATEs required. */
77 	ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
78 	if (ate_index < 0)
79 		return 0;
80 
81 	/* In PCI-X mode, Prefetch not supported */
82 	if (IS_PCIX(pcibus_info))
83 		ate_flags &= ~(PCI32_ATE_PREF);
84 
85 	if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
86 		xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
87 	    					      PHYS_TO_TIODMA(paddr);
88 	else
89 		xio_addr = paddr;
90 
91 	offset = IOPGOFF(xio_addr);
92 	ate = ate_flags | (xio_addr - offset);
93 
94 	/* If PIC, put the targetid in the ATE */
95 	if (IS_PIC_SOFT(pcibus_info)) {
96 		ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
97 	}
98 
99 	/*
100 	 * If we're mapping for MSI, set the MSI bit in the ATE.  If it's a
101 	 * TIOCP based pci bus, we also need to set the PIO bit in the ATE.
102 	 */
103 	if (dma_flags & SN_DMA_MSI) {
104 		ate |= PCI32_ATE_MSI;
105 		if (IS_TIOCP_SOFT(pcibus_info))
106 			ate |= PCI32_ATE_PIO;
107 	}
108 
109 	ate_write(pcibus_info, ate_index, ate_count, ate);
110 
111 	/*
112 	 * Set up the DMA mapped Address.
113 	 */
114 	pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
115 
116 	/*
117 	 * If swap was set in device in pcibr_endian_set()
118 	 * we need to turn swapping on.
119 	 */
120 	if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
121 		ATE_SWAP_ON(pci_addr);
122 
123 
124 	return pci_addr;
125 }
126 
127 static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info,u64 paddr,u64 dma_attributes,int dma_flags)128 pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
129 			u64 dma_attributes, int dma_flags)
130 {
131 	struct pcibus_info *pcibus_info = (struct pcibus_info *)
132 	    ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
133 	u64 pci_addr;
134 
135 	/* Translate to Crosstalk View of Physical Address */
136 	if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
137 		pci_addr = IS_PIC_SOFT(pcibus_info) ?
138 				PHYS_TO_DMA(paddr) :
139 				PHYS_TO_TIODMA(paddr);
140 	else
141 		pci_addr = paddr;
142 	pci_addr |= dma_attributes;
143 
144 	/* Handle Bus mode */
145 	if (IS_PCIX(pcibus_info))
146 		pci_addr &= ~PCI64_ATTR_PREF;
147 
148 	/* Handle Bridge Chipset differences */
149 	if (IS_PIC_SOFT(pcibus_info)) {
150 		pci_addr |=
151 		    ((u64) pcibus_info->
152 		     pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
153 	} else
154 		pci_addr |= (dma_flags & SN_DMA_MSI) ?
155 				TIOCP_PCI64_CMDTYPE_MSI :
156 				TIOCP_PCI64_CMDTYPE_MEM;
157 
158 	/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
159 	if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
160 		pci_addr |= PCI64_ATTR_VIRTUAL;
161 
162 	return pci_addr;
163 }
164 
165 static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,u64 paddr,size_t req_size,u64 flags,int dma_flags)166 pcibr_dmatrans_direct32(struct pcidev_info * info,
167 			u64 paddr, size_t req_size, u64 flags, int dma_flags)
168 {
169 	struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
170 	struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
171 	    pdi_pcibus_info;
172 	u64 xio_addr;
173 
174 	u64 xio_base;
175 	u64 offset;
176 	u64 endoff;
177 
178 	if (IS_PCIX(pcibus_info)) {
179 		return 0;
180 	}
181 
182 	if (dma_flags & SN_DMA_MSI)
183 		return 0;
184 
185 	if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
186 		xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
187 	    					      PHYS_TO_TIODMA(paddr);
188 	else
189 		xio_addr = paddr;
190 
191 	xio_base = pcibus_info->pbi_dir_xbase;
192 	offset = xio_addr - xio_base;
193 	endoff = req_size + offset;
194 	if ((req_size > (1ULL << 31)) ||	/* Too Big */
195 	    (xio_addr < xio_base) ||	/* Out of range for mappings */
196 	    (endoff > (1ULL << 31))) {	/* Too Big */
197 		return 0;
198 	}
199 
200 	return PCI32_DIRECT_BASE | offset;
201 }
202 
203 /*
204  * Wrapper routine for freeing DMA maps
205  * DMA mappings for Direct 64 and 32 do not have any DMA maps.
206  */
207 void
pcibr_dma_unmap(struct pci_dev * hwdev,dma_addr_t dma_handle,int direction)208 pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
209 {
210 	struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
211 	struct pcibus_info *pcibus_info =
212 	    (struct pcibus_info *)pcidev_info->pdi_pcibus_info;
213 
214 	if (IS_PCI32_MAPPED(dma_handle)) {
215 		int ate_index;
216 
217 		ate_index =
218 		    IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
219 		pcibr_ate_free(pcibus_info, ate_index);
220 	}
221 }
222 
223 /*
224  * On SN systems there is a race condition between a PIO read response and
225  * DMA's.  In rare cases, the read response may beat the DMA, causing the
226  * driver to think that data in memory is complete and meaningful.  This code
227  * eliminates that race.  This routine is called by the PIO read routines
228  * after doing the read.  For PIC this routine then forces a fake interrupt
229  * on another line, which is logically associated with the slot that the PIO
230  * is addressed to.  It then spins while watching the memory location that
231  * the interrupt is targeted to.  When the interrupt response arrives, we
232  * are sure that the DMA has landed in memory and it is safe for the driver
233  * to proceed.	For TIOCP use the Device(x) Write Request Buffer Flush
234  * Bridge register since it ensures the data has entered the coherence domain,
235  * unlike the PIC Device(x) Write Request Buffer Flush register.
236  */
237 
sn_dma_flush(u64 addr)238 void sn_dma_flush(u64 addr)
239 {
240 	nasid_t nasid;
241 	int is_tio;
242 	int wid_num;
243 	int i, j;
244 	unsigned long flags;
245 	u64 itte;
246 	struct hubdev_info *hubinfo;
247 	struct sn_flush_device_kernel *p;
248 	struct sn_flush_device_common *common;
249 	struct sn_flush_nasid_entry *flush_nasid_list;
250 
251 	if (!sn_ioif_inited)
252 		return;
253 
254 	nasid = NASID_GET(addr);
255 	if (-1 == nasid_to_cnodeid(nasid))
256 		return;
257 
258 	hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
259 
260 	BUG_ON(!hubinfo);
261 
262 	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
263 	if (flush_nasid_list->widget_p == NULL)
264 		return;
265 
266 	is_tio = (nasid & 1);
267 	if (is_tio) {
268 		int itte_index;
269 
270 		if (TIO_HWIN(addr))
271 			itte_index = 0;
272 		else if (TIO_BWIN_WINDOWNUM(addr))
273 			itte_index = TIO_BWIN_WINDOWNUM(addr);
274 		else
275 			itte_index = -1;
276 
277 		if (itte_index >= 0) {
278 			itte = flush_nasid_list->iio_itte[itte_index];
279 			if (! TIO_ITTE_VALID(itte))
280 				return;
281 			wid_num = TIO_ITTE_WIDGET(itte);
282 		} else
283 			wid_num = TIO_SWIN_WIDGETNUM(addr);
284 	} else {
285 		if (BWIN_WINDOWNUM(addr)) {
286 			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
287 			wid_num = IIO_ITTE_WIDGET(itte);
288 		} else
289 			wid_num = SWIN_WIDGETNUM(addr);
290 	}
291 	if (flush_nasid_list->widget_p[wid_num] == NULL)
292 		return;
293 	p = &flush_nasid_list->widget_p[wid_num][0];
294 
295 	/* find a matching BAR */
296 	for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
297 		common = p->common;
298 		for (j = 0; j < PCI_ROM_RESOURCE; j++) {
299 			if (common->sfdl_bar_list[j].start == 0)
300 				break;
301 			if (addr >= common->sfdl_bar_list[j].start
302 			    && addr <= common->sfdl_bar_list[j].end)
303 				break;
304 		}
305 		if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
306 			break;
307 	}
308 
309 	/* if no matching BAR, return without doing anything. */
310 	if (i == DEV_PER_WIDGET)
311 		return;
312 
313 	/*
314 	 * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
315 	 * register since it ensures the data has entered the coherence
316 	 * domain, unlike PIC.
317 	 */
318 	if (is_tio) {
319 		/*
320 	 	 * Note:  devices behind TIOCE should never be matched in the
321 		 * above code, and so the following code is PIC/CP centric.
322 		 * If CE ever needs the sn_dma_flush mechanism, we will have
323 		 * to account for that here and in tioce_bus_fixup().
324 	 	 */
325 		u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
326 		u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
327 
328 		/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
329 		if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
330 			return;
331 		} else {
332 			pcireg_wrb_flush_get(common->sfdl_pcibus_info,
333 					     (common->sfdl_slot - 1));
334 		}
335 	} else {
336 		spin_lock_irqsave(&p->sfdl_flush_lock, flags);
337 		*common->sfdl_flush_addr = 0;
338 
339 		/* force an interrupt. */
340 		*(volatile u32 *)(common->sfdl_force_int_addr) = 1;
341 
342 		/* wait for the interrupt to come back. */
343 		while (*(common->sfdl_flush_addr) != 0x10f)
344 			cpu_relax();
345 
346 		/* okay, everything is synched up. */
347 		spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
348 	}
349 	return;
350 }
351 
352 /*
353  * DMA interfaces.  Called from pci_dma.c routines.
354  */
355 
356 dma_addr_t
pcibr_dma_map(struct pci_dev * hwdev,unsigned long phys_addr,size_t size,int dma_flags)357 pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
358 {
359 	dma_addr_t dma_handle;
360 	struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
361 
362 	/* SN cannot support DMA addresses smaller than 32 bits. */
363 	if (hwdev->dma_mask < 0x7fffffff) {
364 		return 0;
365 	}
366 
367 	if (hwdev->dma_mask == ~0UL) {
368 		/*
369 		 * Handle the most common case: 64 bit cards.  This
370 		 * call should always succeed.
371 		 */
372 
373 		dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
374 						     PCI64_ATTR_PREF, dma_flags);
375 	} else {
376 		/* Handle 32-63 bit cards via direct mapping */
377 		dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
378 						     size, 0, dma_flags);
379 		if (!dma_handle) {
380 			/*
381 			 * It is a 32 bit card and we cannot do direct mapping,
382 			 * so we use an ATE.
383 			 */
384 
385 			dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
386 							size, PCI32_ATE_PREF,
387 							dma_flags);
388 		}
389 	}
390 
391 	return dma_handle;
392 }
393 
394 dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev,unsigned long phys_addr,size_t size,int dma_flags)395 pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
396 			 size_t size, int dma_flags)
397 {
398 	dma_addr_t dma_handle;
399 	struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
400 
401 	if (hwdev->dev.coherent_dma_mask == ~0UL) {
402 		dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
403 					    PCI64_ATTR_BAR, dma_flags);
404 	} else {
405 		dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
406 						    phys_addr, size,
407 						    PCI32_ATE_BAR, dma_flags);
408 	}
409 
410 	return dma_handle;
411 }
412 
413 EXPORT_SYMBOL(sn_dma_flush);
414