Home
last modified time | relevance | path

Searched refs:mm (Results 1 – 25 of 731) sorted by relevance

12345678910>>...30

/arch/m68k/include/asm/
Dmmu_context.h8 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
32 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
36 if (mm->context != NO_CONTEXT) in get_mmu_context()
49 mm->context = ctx; in get_mmu_context()
50 context_mm[ctx] = mm; in get_mmu_context()
56 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
61 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
63 if (mm->context != NO_CONTEXT) { in destroy_context()
64 clear_bit(mm->context, context_map); in destroy_context()
65 mm->context = NO_CONTEXT; in destroy_context()
[all …]
/arch/s390/include/asm/
Dmmu_context.h19 struct mm_struct *mm) in init_new_context() argument
21 spin_lock_init(&mm->context.lock); in init_new_context()
22 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context()
23 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context()
24 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context()
25 atomic_set(&mm->context.flush_count, 0); in init_new_context()
26 mm->context.gmap_asce = 0; in init_new_context()
27 mm->context.flush_mm = 0; in init_new_context()
29 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context()
31 (current->mm && current->mm->context.alloc_pgste); in init_new_context()
[all …]
Dpgalloc.h25 struct page *page_table_alloc_pgste(struct mm_struct *mm);
51 static inline unsigned long pgd_entry_type(struct mm_struct *mm) in pgd_entry_type() argument
53 if (mm->context.asce_limit <= _REGION3_SIZE) in pgd_entry_type()
55 if (mm->context.asce_limit <= _REGION2_SIZE) in pgd_entry_type()
57 if (mm->context.asce_limit <= _REGION1_SIZE) in pgd_entry_type()
62 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
65 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument
67 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one()
74 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument
76 if (!mm_p4d_folded(mm)) in p4d_free()
[all …]
Dtlbflush.h52 static inline void __tlb_flush_mm(struct mm_struct *mm) in __tlb_flush_mm() argument
62 atomic_inc(&mm->context.flush_count); in __tlb_flush_mm()
64 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); in __tlb_flush_mm()
66 gmap_asce = READ_ONCE(mm->context.gmap_asce); in __tlb_flush_mm()
70 __tlb_flush_idte(mm->context.asce); in __tlb_flush_mm()
75 atomic_dec(&mm->context.flush_count); in __tlb_flush_mm()
92 static inline void __tlb_flush_mm(struct mm_struct *mm) in __tlb_flush_mm() argument
103 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) in __tlb_flush_mm_lazy() argument
105 spin_lock(&mm->context.lock); in __tlb_flush_mm_lazy()
106 if (mm->context.flush_mm) { in __tlb_flush_mm_lazy()
[all …]
/arch/x86/include/asm/
Dmmu_context.h29 static inline void load_mm_cr4(struct mm_struct *mm) in load_mm_cr4() argument
32 atomic_read(&mm->context.perf_rdpmc_allowed)) in load_mm_cr4()
38 static inline void load_mm_cr4(struct mm_struct *mm) {} in load_mm_cr4() argument
84 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument
86 mm->context.ldt = NULL; in init_new_context_ldt()
87 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt()
89 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
90 void destroy_context_ldt(struct mm_struct *mm);
91 void ldt_arch_exit_mmap(struct mm_struct *mm);
93 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument
[all …]
Dpkeys.h16 extern int __execute_only_pkey(struct mm_struct *mm);
17 static inline int execute_only_pkey(struct mm_struct *mm) in execute_only_pkey() argument
22 return __execute_only_pkey(mm); in execute_only_pkey()
41 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map) argument
42 #define mm_set_pkey_allocated(mm, pkey) do { \ argument
43 mm_pkey_allocation_map(mm) |= (1U << pkey); \
45 #define mm_set_pkey_free(mm, pkey) do { \ argument
46 mm_pkey_allocation_map(mm) &= ~(1U << pkey); \
50 bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) in mm_pkey_is_allocated() argument
66 if (pkey == mm->context.execute_only_pkey) in mm_pkey_is_allocated()
[all …]
Dpgalloc.h9 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument
14 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument
15 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument
16 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument
17 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument
20 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument
21 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument
48 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
56 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
62 static inline void pte_free(struct mm_struct *mm, struct page *pte) in pte_free() argument
[all …]
/arch/sparc/include/asm/
Dmmu_context_64.h17 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
26 void get_new_mmu_context(struct mm_struct *mm);
27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
28 void destroy_context(struct mm_struct *mm);
36 static inline void tsb_context_switch_ctx(struct mm_struct *mm, in tsb_context_switch_ctx() argument
39 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx()
40 &mm->context.tsb_block[MM_TSB_BASE], in tsb_context_switch_ctx()
42 (mm->context.tsb_block[MM_TSB_HUGE].tsb ? in tsb_context_switch_ctx()
43 &mm->context.tsb_block[MM_TSB_HUGE] : in tsb_context_switch_ctx()
48 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), in tsb_context_switch_ctx()
[all …]
/arch/arm/include/asm/
Dmmu_context.h27 void __check_vmalloc_seq(struct mm_struct *mm);
31 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
33 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
35 atomic64_set(&mm->context.id, 0); in init_new_context()
40 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
43 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
53 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
56 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context()
57 __check_vmalloc_seq(mm); in check_and_switch_context()
67 mm->context.switch_pending = 1; in check_and_switch_context()
[all …]
/arch/s390/mm/
Dpgalloc.c55 unsigned long *crst_table_alloc(struct mm_struct *mm) in crst_table_alloc() argument
65 void crst_table_free(struct mm_struct *mm, unsigned long *table) in crst_table_free() argument
72 struct mm_struct *mm = arg; in __crst_table_upgrade() local
74 if (current->active_mm == mm) { in __crst_table_upgrade()
76 set_user_asce(mm); in __crst_table_upgrade()
81 int crst_table_upgrade(struct mm_struct *mm, unsigned long end) in crst_table_upgrade() argument
87 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); in crst_table_upgrade()
90 while (mm->context.asce_limit < end) { in crst_table_upgrade()
91 table = crst_table_alloc(mm); in crst_table_upgrade()
96 spin_lock_bh(&mm->page_table_lock); in crst_table_upgrade()
[all …]
Dpgtable.c28 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument
35 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
39 asce = asce ? : mm->context.asce; in ptep_ipte_local()
48 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
55 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
59 asce = asce ? : mm->context.asce; in ptep_ipte_global()
68 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
77 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
79 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct()
80 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct()
[all …]
/arch/m32r/include/asm/
Dmmu_context.h29 #define mm_context(mm) mm->context argument
33 #define mm_context(mm) mm->context[smp_processor_id()] argument
40 #define enter_lazy_tlb(mm, tsk) do { } while (0) argument
42 static inline void get_new_mmu_context(struct mm_struct *mm) in get_new_mmu_context() argument
55 mm_context(mm) = mc; in get_new_mmu_context()
61 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
63 if (mm) { in get_mmu_context()
68 if ((mm_context(mm) ^ mc) & MMU_CONTEXT_VERSION_MASK) in get_mmu_context()
69 get_new_mmu_context(mm); in get_mmu_context()
78 struct mm_struct *mm) in init_new_context() argument
[all …]
/arch/mn10300/include/asm/
Dmmu_context.h39 #define enter_lazy_tlb(mm, tsk) do {} while (0) argument
41 static inline void cpu_ran_vm(int cpu, struct mm_struct *mm) in cpu_ran_vm() argument
44 cpumask_set_cpu(cpu, mm_cpumask(mm)); in cpu_ran_vm()
48 static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm) in cpu_maybe_ran_vm() argument
51 return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm)); in cpu_maybe_ran_vm()
59 #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()]) argument
65 static inline unsigned long allocate_mmu_context(struct mm_struct *mm) in allocate_mmu_context() argument
80 mm_context(mm) = mc; in allocate_mmu_context()
87 static inline unsigned long get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
91 if (mm) { in get_mmu_context()
[all …]
/arch/um/kernel/skas/
Dmmu.c18 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, in init_stub_pte() argument
26 pgd = pgd_offset(mm, proc); in init_stub_pte()
27 pud = pud_alloc(mm, pgd, proc); in init_stub_pte()
31 pmd = pmd_alloc(mm, pud, proc); in init_stub_pte()
35 pte = pte_alloc_map(mm, pmd, proc); in init_stub_pte()
44 pmd_free(mm, pmd); in init_stub_pte()
46 pud_free(mm, pud); in init_stub_pte()
51 int init_new_context(struct task_struct *task, struct mm_struct *mm) in init_new_context() argument
54 struct mm_context *to_mm = &mm->context; in init_new_context()
63 if (current->mm != NULL && current->mm != &init_mm) in init_new_context()
[all …]
/arch/sparc/mm/
Dtlb.c27 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local
34 if (CTX_VALID(mm->context)) { in flush_tlb_pending()
36 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending()
39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending()
42 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending()
70 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument
82 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one()
88 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one()
89 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one()
94 tb->mm = mm; in tlb_batch_add_one()
[all …]
/arch/alpha/include/asm/
Dtlbflush.h22 ev4_flush_tlb_current(struct mm_struct *mm) in ev4_flush_tlb_current() argument
24 __load_new_mm_context(mm); in ev4_flush_tlb_current()
29 ev5_flush_tlb_current(struct mm_struct *mm) in ev5_flush_tlb_current() argument
31 __load_new_mm_context(mm); in ev5_flush_tlb_current()
39 ev4_flush_tlb_current_page(struct mm_struct * mm, in ev4_flush_tlb_current_page() argument
45 __load_new_mm_context(mm); in ev4_flush_tlb_current_page()
52 ev5_flush_tlb_current_page(struct mm_struct * mm, in ev5_flush_tlb_current_page() argument
57 __load_new_mm_context(mm); in ev5_flush_tlb_current_page()
90 flush_tlb_other(struct mm_struct *mm) in flush_tlb_other() argument
92 unsigned long *mmc = &mm->context[smp_processor_id()]; in flush_tlb_other()
[all …]
/arch/powerpc/include/asm/
Dmmu_context.h17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 extern void destroy_context(struct mm_struct *mm);
23 extern bool mm_iommu_preregistered(struct mm_struct *mm);
24 extern long mm_iommu_get(struct mm_struct *mm,
27 extern long mm_iommu_put(struct mm_struct *mm,
29 extern void mm_iommu_init(struct mm_struct *mm);
30 extern void mm_iommu_cleanup(struct mm_struct *mm);
31 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
34 struct mm_struct *mm, unsigned long ua, unsigned long size);
35 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
[all …]
/arch/powerpc/mm/
Dmmu_context_book3s64.c87 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument
99 if (!mm->context.addr_limit) in hash__init_new_context()
100 mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; in hash__init_new_context()
116 if (mm->context.id == 0) in hash__init_new_context()
117 slice_set_user_psize(mm, mmu_virtual_psize); in hash__init_new_context()
119 subpage_prot_init_new_context(mm); in hash__init_new_context()
124 static int radix__init_new_context(struct mm_struct *mm) in radix__init_new_context() argument
138 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); in radix__init_new_context()
148 mm->context.npu_context = NULL; in radix__init_new_context()
153 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
[all …]
Dslice.c97 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument
102 if ((mm->context.addr_limit - len) < addr) in slice_area_is_free()
104 vma = find_vma(mm, addr); in slice_area_is_free()
108 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument
110 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma()
114 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument
127 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma()
130 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) in slice_mask_for_free() argument
139 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free()
142 if (mm->context.addr_limit <= SLICE_LOW_TOP) in slice_mask_for_free()
[all …]
Dmmap.c107 struct mm_struct *mm = current->mm; in radix__arch_get_unmapped_area() local
124 if (unlikely(addr > mm->context.addr_limit && in radix__arch_get_unmapped_area()
125 mm->context.addr_limit != TASK_SIZE)) in radix__arch_get_unmapped_area()
126 mm->context.addr_limit = TASK_SIZE; in radix__arch_get_unmapped_area()
133 vma = find_vma(mm, addr); in radix__arch_get_unmapped_area()
141 info.low_limit = mm->mmap_base; in radix__arch_get_unmapped_area()
156 struct mm_struct *mm = current->mm; in radix__arch_get_unmapped_area_topdown() local
173 if (unlikely(addr > mm->context.addr_limit && in radix__arch_get_unmapped_area_topdown()
174 mm->context.addr_limit != TASK_SIZE)) in radix__arch_get_unmapped_area_topdown()
175 mm->context.addr_limit = TASK_SIZE; in radix__arch_get_unmapped_area_topdown()
[all …]
/arch/x86/mm/
Dmpx.c25 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) in mpx_bd_size_bytes() argument
27 if (is_64bit_mm(mm)) in mpx_bd_size_bytes()
33 static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) in mpx_bt_size_bytes() argument
35 if (is_64bit_mm(mm)) in mpx_bt_size_bytes()
47 struct mm_struct *mm = current->mm; in mpx_mmap() local
51 if (len != mpx_bt_size_bytes(mm)) in mpx_mmap()
54 down_write(&mm->mmap_sem); in mpx_mmap()
57 up_write(&mm->mmap_sem); in mpx_mmap()
343 struct mm_struct *mm = current->mm; in mpx_enable_management() local
358 down_write(&mm->mmap_sem); in mpx_enable_management()
[all …]
/arch/powerpc/include/asm/book3s/64/
Dpgalloc.h51 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) in radix__pgd_alloc() argument
54 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); in radix__pgd_alloc()
57 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), in radix__pgd_alloc()
65 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) in radix__pgd_free() argument
74 static inline pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
79 return radix__pgd_alloc(mm); in pgd_alloc()
82 pgtable_gfp_flags(mm, GFP_KERNEL)); in pgd_alloc()
88 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
91 return radix__pgd_free(mm, pgd); in pgd_free()
95 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) in pgd_populate() argument
[all …]
/arch/microblaze/include/asm/
Dmmu_context_mm.h38 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
83 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
87 if (mm->context != NO_CONTEXT) in get_mmu_context()
98 mm->context = ctx; in get_mmu_context()
99 context_mm[ctx] = mm; in get_mmu_context()
105 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
110 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
112 if (mm->context != NO_CONTEXT) { in destroy_context()
113 clear_bit(mm->context, context_map); in destroy_context()
114 mm->context = NO_CONTEXT; in destroy_context()
[all …]
/arch/cris/mm/
Dtlb.c39 alloc_context(struct mm_struct *mm) in alloc_context() argument
43 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); in alloc_context()
60 mm->context.page_id = map_replace_ptr; in alloc_context()
61 page_id_map[map_replace_ptr] = mm; in alloc_context()
74 get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
76 if(mm->context.page_id == NO_CONTEXT) in get_mmu_context()
77 alloc_context(mm); in get_mmu_context()
89 destroy_context(struct mm_struct *mm) in destroy_context() argument
91 if(mm->context.page_id != NO_CONTEXT) { in destroy_context()
92 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); in destroy_context()
[all …]
/arch/unicore32/include/asm/
Dmmu_context.h24 #define init_new_context(tsk, mm) 0 argument
26 #define destroy_context(mm) do { } while (0) argument
38 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) in enter_lazy_tlb() argument
58 #define deactivate_mm(tsk, mm) do { } while (0) argument
68 #define arch_exit_mmap(mm) \ argument
70 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
76 mm->mmap = NULL; \
77 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
78 vmacache_invalidate(mm); \
79 mm->map_count--; \
[all …]

12345678910>>...30