• Home
  • Raw
  • Download

Lines Matching full:tlb

12 #include <asm/tlb.h>
16 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument
20 batch = tlb->active; in tlb_next_batch()
22 tlb->active = batch->next; in tlb_next_batch()
26 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
33 tlb->batch_count++; in tlb_next_batch()
38 tlb->active->next = batch; in tlb_next_batch()
39 tlb->active = batch; in tlb_next_batch()
44 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument
48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush()
52 tlb->active = &tlb->local; in tlb_batch_pages_flush()
55 static void tlb_batch_list_free(struct mmu_gather *tlb) in tlb_batch_list_free() argument
59 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
63 tlb->local.next = NULL; in tlb_batch_list_free()
66 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) in __tlb_remove_page_size() argument
70 VM_BUG_ON(!tlb->end); in __tlb_remove_page_size()
73 VM_WARN_ON(tlb->page_size != page_size); in __tlb_remove_page_size()
76 batch = tlb->active; in __tlb_remove_page_size()
83 if (!tlb_next_batch(tlb)) in __tlb_remove_page_size()
85 batch = tlb->active; in __tlb_remove_page_size()
120 * IRQs delays the completion of the TLB flush we can never observe an already
176 * If we want tlb_remove_table() to imply TLB invalidates.
178 static inline void tlb_table_invalidate(struct mmu_gather *tlb) in tlb_table_invalidate() argument
186 tlb_flush_mmu_tlbonly(tlb); in tlb_table_invalidate()
196 static void tlb_table_flush(struct mmu_gather *tlb) in tlb_table_flush() argument
198 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
201 tlb_table_invalidate(tlb); in tlb_table_flush()
207 void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
209 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
214 tlb_table_invalidate(tlb); in tlb_remove_table()
223 tlb_table_flush(tlb); in tlb_remove_table()
226 static inline void tlb_table_init(struct mmu_gather *tlb) in tlb_table_init() argument
228 tlb->batch = NULL; in tlb_table_init()
233 static inline void tlb_table_flush(struct mmu_gather *tlb) { } in tlb_table_flush() argument
234 static inline void tlb_table_init(struct mmu_gather *tlb) { } in tlb_table_init() argument
238 static void tlb_flush_mmu_free(struct mmu_gather *tlb) in tlb_flush_mmu_free() argument
240 tlb_table_flush(tlb); in tlb_flush_mmu_free()
242 tlb_batch_pages_flush(tlb); in tlb_flush_mmu_free()
246 void tlb_flush_mmu(struct mmu_gather *tlb) in tlb_flush_mmu() argument
248 tlb_flush_mmu_tlbonly(tlb); in tlb_flush_mmu()
249 tlb_flush_mmu_free(tlb); in tlb_flush_mmu()
254 * @tlb: the mmu_gather structure to initialize
264 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, in tlb_gather_mmu() argument
267 tlb->mm = mm; in tlb_gather_mmu()
270 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu()
273 tlb->need_flush_all = 0; in tlb_gather_mmu()
274 tlb->local.next = NULL; in tlb_gather_mmu()
275 tlb->local.nr = 0; in tlb_gather_mmu()
276 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu()
277 tlb->active = &tlb->local; in tlb_gather_mmu()
278 tlb->batch_count = 0; in tlb_gather_mmu()
281 tlb_table_init(tlb); in tlb_gather_mmu()
283 tlb->page_size = 0; in tlb_gather_mmu()
286 __tlb_reset_range(tlb); in tlb_gather_mmu()
287 inc_tlb_flush_pending(tlb->mm); in tlb_gather_mmu()
292 * @tlb: the mmu_gather structure to finish
299 void tlb_finish_mmu(struct mmu_gather *tlb, in tlb_finish_mmu() argument
304 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB in tlb_finish_mmu()
306 * and result in having stale TLB entries. So flush TLB forcefully in tlb_finish_mmu()
311 * may result in having stale TLB entries for some architectures, in tlb_finish_mmu()
312 * e.g. aarch64, that could specify flush what level TLB. in tlb_finish_mmu()
314 if (mm_tlb_flush_nested(tlb->mm)) { in tlb_finish_mmu()
323 tlb->fullmm = 1; in tlb_finish_mmu()
324 __tlb_reset_range(tlb); in tlb_finish_mmu()
325 tlb->freed_tables = 1; in tlb_finish_mmu()
328 tlb_flush_mmu(tlb); in tlb_finish_mmu()
331 tlb_batch_list_free(tlb); in tlb_finish_mmu()
333 dec_tlb_flush_pending(tlb->mm); in tlb_finish_mmu()