• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/cpu.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/gfp.h>
4 #include <linux/highmem.h>
5 
6 #include <xen/features.h>
7 
8 static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
9 static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
10 
alloc_xen_mm32_scratch_page(int cpu)11 static int alloc_xen_mm32_scratch_page(int cpu)
12 {
13 	struct page *page;
14 	unsigned long virt;
15 	pmd_t *pmdp;
16 	pte_t *ptep;
17 
18 	if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
19 		return 0;
20 
21 	page = alloc_page(GFP_KERNEL);
22 	if (page == NULL) {
23 		pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
24 		return -ENOMEM;
25 	}
26 
27 	virt = (unsigned long)__va(page_to_phys(page));
28 	pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
29 	ptep = pte_offset_kernel(pmdp, virt);
30 
31 	per_cpu(xen_mm32_scratch_virt, cpu) = virt;
32 	per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
33 
34 	return 0;
35 }
36 
xen_mm32_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)37 static int xen_mm32_cpu_notify(struct notifier_block *self,
38 				    unsigned long action, void *hcpu)
39 {
40 	int cpu = (long)hcpu;
41 	switch (action) {
42 	case CPU_UP_PREPARE:
43 		if (alloc_xen_mm32_scratch_page(cpu))
44 			return NOTIFY_BAD;
45 		break;
46 	default:
47 		break;
48 	}
49 	return NOTIFY_OK;
50 }
51 
52 static struct notifier_block xen_mm32_cpu_notifier = {
53 	.notifier_call	= xen_mm32_cpu_notify,
54 };
55 
xen_mm32_remap_page(dma_addr_t handle)56 static void* xen_mm32_remap_page(dma_addr_t handle)
57 {
58 	unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
59 	pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
60 
61 	*ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
62 	local_flush_tlb_kernel_page(virt);
63 
64 	return (void*)virt;
65 }
66 
xen_mm32_unmap(void * vaddr)67 static void xen_mm32_unmap(void *vaddr)
68 {
69 	put_cpu_var(xen_mm32_scratch_virt);
70 }
71 
72 
73 /* functions called by SWIOTLB */
74 
dma_cache_maint(dma_addr_t handle,unsigned long offset,size_t size,enum dma_data_direction dir,void (* op)(const void *,size_t,int))75 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
76 	size_t size, enum dma_data_direction dir,
77 	void (*op)(const void *, size_t, int))
78 {
79 	unsigned long pfn;
80 	size_t left = size;
81 
82 	pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
83 	offset %= PAGE_SIZE;
84 
85 	do {
86 		size_t len = left;
87 		void *vaddr;
88 
89 		if (!pfn_valid(pfn))
90 		{
91 			/* Cannot map the page, we don't know its physical address.
92 			 * Return and hope for the best */
93 			if (!xen_feature(XENFEAT_grant_map_identity))
94 				return;
95 			vaddr = xen_mm32_remap_page(handle) + offset;
96 			op(vaddr, len, dir);
97 			xen_mm32_unmap(vaddr - offset);
98 		} else {
99 			struct page *page = pfn_to_page(pfn);
100 
101 			if (PageHighMem(page)) {
102 				if (len + offset > PAGE_SIZE)
103 					len = PAGE_SIZE - offset;
104 
105 				if (cache_is_vipt_nonaliasing()) {
106 					vaddr = kmap_atomic(page);
107 					op(vaddr + offset, len, dir);
108 					kunmap_atomic(vaddr);
109 				} else {
110 					vaddr = kmap_high_get(page);
111 					if (vaddr) {
112 						op(vaddr + offset, len, dir);
113 						kunmap_high(page);
114 					}
115 				}
116 			} else {
117 				vaddr = page_address(page) + offset;
118 				op(vaddr, len, dir);
119 			}
120 		}
121 
122 		offset = 0;
123 		pfn++;
124 		left -= len;
125 	} while (left);
126 }
127 
__xen_dma_page_dev_to_cpu(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir)128 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
129 		size_t size, enum dma_data_direction dir)
130 {
131 	/* Cannot use __dma_page_dev_to_cpu because we don't have a
132 	 * struct page for handle */
133 
134 	if (dir != DMA_TO_DEVICE)
135 		outer_inv_range(handle, handle + size);
136 
137 	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
138 }
139 
__xen_dma_page_cpu_to_dev(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir)140 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
141 		size_t size, enum dma_data_direction dir)
142 {
143 
144 	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
145 
146 	if (dir == DMA_FROM_DEVICE) {
147 		outer_inv_range(handle, handle + size);
148 	} else {
149 		outer_clean_range(handle, handle + size);
150 	}
151 }
152 
xen_dma_unmap_page(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)153 void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
154 		size_t size, enum dma_data_direction dir,
155 		struct dma_attrs *attrs)
156 
157 {
158 	if (!__generic_dma_ops(hwdev)->unmap_page)
159 		return;
160 	if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
161 		return;
162 
163 	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
164 }
165 
xen_dma_sync_single_for_cpu(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir)166 void xen_dma_sync_single_for_cpu(struct device *hwdev,
167 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
168 {
169 	if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
170 		return;
171 	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
172 }
173 
xen_dma_sync_single_for_device(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir)174 void xen_dma_sync_single_for_device(struct device *hwdev,
175 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
176 {
177 	if (!__generic_dma_ops(hwdev)->sync_single_for_device)
178 		return;
179 	__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
180 }
181 
xen_mm32_init(void)182 int __init xen_mm32_init(void)
183 {
184 	int cpu;
185 
186 	if (!xen_initial_domain())
187 		return 0;
188 
189 	register_cpu_notifier(&xen_mm32_cpu_notifier);
190 	get_online_cpus();
191 	for_each_online_cpu(cpu) {
192 		if (alloc_xen_mm32_scratch_page(cpu)) {
193 			put_online_cpus();
194 			unregister_cpu_notifier(&xen_mm32_cpu_notifier);
195 			return -ENOMEM;
196 		}
197 	}
198 	put_online_cpus();
199 
200 	return 0;
201 }
202 arch_initcall(xen_mm32_init);
203