• Home
  • Raw
  • Download

Lines Matching refs:tlb

16 static bool tlb_next_batch(struct mmu_gather *tlb)  in tlb_next_batch()  argument
20 batch = tlb->active; in tlb_next_batch()
22 tlb->active = batch->next; in tlb_next_batch()
26 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
33 tlb->batch_count++; in tlb_next_batch()
38 tlb->active->next = batch; in tlb_next_batch()
39 tlb->active = batch; in tlb_next_batch()
44 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument
48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush()
52 tlb->active = &tlb->local; in tlb_batch_pages_flush()
55 static void tlb_batch_list_free(struct mmu_gather *tlb) in tlb_batch_list_free() argument
59 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
63 tlb->local.next = NULL; in tlb_batch_list_free()
66 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) in __tlb_remove_page_size() argument
70 VM_BUG_ON(!tlb->end); in __tlb_remove_page_size()
73 VM_WARN_ON(tlb->page_size != page_size); in __tlb_remove_page_size()
76 batch = tlb->active; in __tlb_remove_page_size()
83 if (!tlb_next_batch(tlb)) in __tlb_remove_page_size()
85 batch = tlb->active; in __tlb_remove_page_size()
103 static inline void tlb_table_invalidate(struct mmu_gather *tlb) in tlb_table_invalidate() argument
111 tlb_flush_mmu_tlbonly(tlb); in tlb_table_invalidate()
146 static void tlb_table_flush(struct mmu_gather *tlb) in tlb_table_flush() argument
148 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
151 tlb_table_invalidate(tlb); in tlb_table_flush()
157 void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
159 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
164 tlb_table_invalidate(tlb); in tlb_remove_table()
173 tlb_table_flush(tlb); in tlb_remove_table()
178 static void tlb_flush_mmu_free(struct mmu_gather *tlb) in tlb_flush_mmu_free() argument
181 tlb_table_flush(tlb); in tlb_flush_mmu_free()
184 tlb_batch_pages_flush(tlb); in tlb_flush_mmu_free()
188 void tlb_flush_mmu(struct mmu_gather *tlb) in tlb_flush_mmu() argument
190 tlb_flush_mmu_tlbonly(tlb); in tlb_flush_mmu()
191 tlb_flush_mmu_free(tlb); in tlb_flush_mmu()
206 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, in tlb_gather_mmu() argument
209 tlb->mm = mm; in tlb_gather_mmu()
212 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu()
215 tlb->need_flush_all = 0; in tlb_gather_mmu()
216 tlb->local.next = NULL; in tlb_gather_mmu()
217 tlb->local.nr = 0; in tlb_gather_mmu()
218 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu()
219 tlb->active = &tlb->local; in tlb_gather_mmu()
220 tlb->batch_count = 0; in tlb_gather_mmu()
224 tlb->batch = NULL; in tlb_gather_mmu()
227 tlb->page_size = 0; in tlb_gather_mmu()
230 __tlb_reset_range(tlb); in tlb_gather_mmu()
231 inc_tlb_flush_pending(tlb->mm); in tlb_gather_mmu()
243 void tlb_finish_mmu(struct mmu_gather *tlb, in tlb_finish_mmu() argument
258 if (mm_tlb_flush_nested(tlb->mm)) { in tlb_finish_mmu()
267 tlb->fullmm = 1; in tlb_finish_mmu()
268 __tlb_reset_range(tlb); in tlb_finish_mmu()
269 tlb->freed_tables = 1; in tlb_finish_mmu()
272 tlb_flush_mmu(tlb); in tlb_finish_mmu()
275 tlb_batch_list_free(tlb); in tlb_finish_mmu()
277 dec_tlb_flush_pending(tlb->mm); in tlb_finish_mmu()