1 /*
2 * CoProcessor (SPU/AFU) mm fault handler
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 * Author: Jeremy Kerr <jk@ozlabs.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23 #include <linux/sched.h>
24 #include <linux/mm.h>
25 #include <linux/export.h>
26 #include <asm/reg.h>
27 #include <asm/copro.h>
28 #include <asm/spu.h>
29 #include <misc/cxl-base.h>
30
31 /*
32 * This ought to be kept in sync with the powerpc specific do_page_fault
33 * function. Currently, there are a few corner cases that we haven't had
34 * to handle fortunately.
35 */
copro_handle_mm_fault(struct mm_struct * mm,unsigned long ea,unsigned long dsisr,unsigned * flt)36 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
37 unsigned long dsisr, unsigned *flt)
38 {
39 struct vm_area_struct *vma;
40 unsigned long is_write;
41 int ret;
42
43 if (mm == NULL)
44 return -EFAULT;
45
46 if (mm->pgd == NULL)
47 return -EFAULT;
48
49 down_read(&mm->mmap_sem);
50 ret = -EFAULT;
51 vma = find_vma(mm, ea);
52 if (!vma)
53 goto out_unlock;
54
55 if (ea < vma->vm_start) {
56 if (!(vma->vm_flags & VM_GROWSDOWN))
57 goto out_unlock;
58 if (expand_stack(vma, ea))
59 goto out_unlock;
60 }
61
62 is_write = dsisr & DSISR_ISSTORE;
63 if (is_write) {
64 if (!(vma->vm_flags & VM_WRITE))
65 goto out_unlock;
66 } else {
67 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
68 goto out_unlock;
69 /*
70 * protfault should only happen due to us
71 * mapping a region readonly temporarily. PROT_NONE
72 * is also covered by the VMA check above.
73 */
74 WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
75 }
76
77 ret = 0;
78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
79 if (unlikely(*flt & VM_FAULT_ERROR)) {
80 if (*flt & VM_FAULT_OOM) {
81 ret = -ENOMEM;
82 goto out_unlock;
83 } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
84 ret = -EFAULT;
85 goto out_unlock;
86 }
87 BUG();
88 }
89
90 if (*flt & VM_FAULT_MAJOR)
91 current->maj_flt++;
92 else
93 current->min_flt++;
94
95 out_unlock:
96 up_read(&mm->mmap_sem);
97 return ret;
98 }
99 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
100
copro_calculate_slb(struct mm_struct * mm,u64 ea,struct copro_slb * slb)101 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
102 {
103 u64 vsid, vsidkey;
104 int psize, ssize;
105
106 switch (REGION_ID(ea)) {
107 case USER_REGION_ID:
108 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
109 if (mm == NULL)
110 return 1;
111 psize = get_slice_psize(mm, ea);
112 ssize = user_segment_size(ea);
113 vsid = get_vsid(mm->context.id, ea, ssize);
114 vsidkey = SLB_VSID_USER;
115 break;
116 case VMALLOC_REGION_ID:
117 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
118 if (ea < VMALLOC_END)
119 psize = mmu_vmalloc_psize;
120 else
121 psize = mmu_io_psize;
122 ssize = mmu_kernel_ssize;
123 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
124 vsidkey = SLB_VSID_KERNEL;
125 break;
126 case KERNEL_REGION_ID:
127 pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
128 psize = mmu_linear_psize;
129 ssize = mmu_kernel_ssize;
130 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
131 vsidkey = SLB_VSID_KERNEL;
132 break;
133 default:
134 pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
135 return 1;
136 }
137
138 vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
139
140 vsid |= mmu_psize_defs[psize].sllp |
141 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
142
143 slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
144 slb->vsid = vsid;
145
146 return 0;
147 }
148 EXPORT_SYMBOL_GPL(copro_calculate_slb);
149
copro_flush_all_slbs(struct mm_struct * mm)150 void copro_flush_all_slbs(struct mm_struct *mm)
151 {
152 #ifdef CONFIG_SPU_BASE
153 spu_flush_all_slbs(mm);
154 #endif
155 cxl_slbia(mm);
156 }
157 EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
158