• Home
  • Raw
  • Download

Lines Matching +full:half +full:- +full:bit

1 // SPDX-License-Identifier: GPL-2.0
38 return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM; in page_table_register_sysctl()
64 if (current->active_mm == mm) { in __crst_table_upgrade()
65 S390_lowcore.user_asce = mm->context.asce; in __crst_table_upgrade()
74 unsigned long asce_limit = mm->context.asce_limit; in crst_table_upgrade()
95 spin_lock_bh(&mm->page_table_lock); in crst_table_upgrade()
102 VM_BUG_ON(asce_limit != mm->context.asce_limit); in crst_table_upgrade()
105 __pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
107 mm->pgd = (pgd_t *) p4d; in crst_table_upgrade()
108 mm->context.asce_limit = _REGION1_SIZE; in crst_table_upgrade()
109 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade()
114 __pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
116 mm->pgd = (pgd_t *) pgd; in crst_table_upgrade()
117 mm->context.asce_limit = TASK_SIZE_MAX; in crst_table_upgrade()
118 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade()
122 spin_unlock_bh(&mm->page_table_lock); in crst_table_upgrade()
131 return -ENOMEM; in crst_table_upgrade()
164 * A 2KB-pgtable is either upper or lower half of a normal page.
165 * The second half of the page may be unused or used as another
166 * 2KB-pgtable.
168 * Whenever possible the parent page for a new 2KB-pgtable is picked
173 * When a parent page gets fully allocated it contains 2KB-pgtables in both
176 * When 2KB-pgtable is freed from to fully allocated parent page that
179 * If 2KB-pgtable is freed from the partially allocated parent page that
186 * The upper byte (bits 24-31) of the parent page _refcount is used
187 * for tracking contained 2KB-pgtables and has the following format:
190 * 01234567 upper byte (bits 24-31) of struct page::_refcount
192 * || |+--- upper 2KB-pgtable is allocated
193 * || +---- lower 2KB-pgtable is allocated
194 * |+------- upper 2KB-pgtable is pending for removal
195 * +-------- lower 2KB-pgtable is pending for removal
200 * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
202 * - added to mm_context_t::pgtable_list in case the second half of the
204 * - removed from mm_context_t::pgtable_list in case both hales of the
208 * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
209 * and the corresponding PP bit is set to 1 in a single atomic operation.
210 * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
213 * - added to mm_context_t::pgtable_list in case the second half of the
215 * - removed from mm_context_t::pgtable_list in case the second half of
223 * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
224 * while both AA bits and the second PP bit are already unset. Then the
225 * parent page does not contain any 2KB-pgtable fragment anymore, and it has
229 * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
230 * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
238 * But for simplicity, because page->rcu_head overlays page->lru, and because
247 unsigned int mask, bit; in page_table_alloc() local
252 spin_lock_bh(&mm->context.lock); in page_table_alloc()
253 if (!list_empty(&mm->context.pgtable_list)) { in page_table_alloc()
254 ptdesc = list_first_entry(&mm->context.pgtable_list, in page_table_alloc()
256 mask = atomic_read(&ptdesc->_refcount) >> 24; in page_table_alloc()
269 bit = mask & 1; /* =1 -> second 2K */ in page_table_alloc()
270 if (bit) in page_table_alloc()
272 atomic_xor_bits(&ptdesc->_refcount, in page_table_alloc()
273 0x01U << (bit + 24)); in page_table_alloc()
274 list_del_init(&ptdesc->pt_list); in page_table_alloc()
277 spin_unlock_bh(&mm->context.lock); in page_table_alloc()
294 INIT_LIST_HEAD(&ptdesc->pt_list); in page_table_alloc()
295 atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); in page_table_alloc()
300 atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24); in page_table_alloc()
302 spin_lock_bh(&mm->context.lock); in page_table_alloc()
303 list_add(&ptdesc->pt_list, &mm->context.pgtable_list); in page_table_alloc()
304 spin_unlock_bh(&mm->context.lock); in page_table_alloc()
310 unsigned int half, unsigned int mask) in page_table_release_check() argument
316 if (!mask && list_empty(&page->lru)) in page_table_release_check()
319 "Invalid pgtable %p release half 0x%02x mask 0x%02x", in page_table_release_check()
320 table, half, mask); in page_table_release_check()
335 unsigned int mask, bit, half; in page_table_free() local
340 bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); in page_table_free()
341 spin_lock_bh(&mm->context.lock); in page_table_free()
347 mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24)); in page_table_free()
351 * Other half is allocated, and neither half has had in page_table_free()
353 * this freed half available for immediate reuse. in page_table_free()
355 list_add(&ptdesc->pt_list, &mm->context.pgtable_list); in page_table_free()
358 list_del_init(&ptdesc->pt_list); in page_table_free()
360 spin_unlock_bh(&mm->context.lock); in page_table_free()
361 mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24)); in page_table_free()
365 half = 0x01U << bit; in page_table_free()
367 half = 0x03U; in page_table_free()
368 mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); in page_table_free()
372 page_table_release_check(ptdesc_page(ptdesc), table, half, mask); in page_table_free()
374 call_rcu(&ptdesc->pt_rcu_head, pte_free_now); in page_table_free()
376 pte_free_now(&ptdesc->pt_rcu_head); in page_table_free()
383 unsigned int bit, mask; in page_table_free_rcu() local
386 mm = tlb->mm; in page_table_free_rcu()
393 bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); in page_table_free_rcu()
394 spin_lock_bh(&mm->context.lock); in page_table_free_rcu()
400 mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24)); in page_table_free_rcu()
404 * Other half is allocated, and neither half has had in page_table_free_rcu()
406 * this freed half available for reuse once its pending in page_table_free_rcu()
407 * bit has been cleared by __tlb_remove_table(). in page_table_free_rcu()
409 list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list); in page_table_free_rcu()
412 list_del_init(&ptdesc->pt_list); in page_table_free_rcu()
414 spin_unlock_bh(&mm->context.lock); in page_table_free_rcu()
415 table = (unsigned long *) ((unsigned long) table | (0x01U << bit)); in page_table_free_rcu()
421 unsigned int mask = (unsigned long) _table & 0x03U, half = mask; in __tlb_remove_table() local
425 switch (half) { in __tlb_remove_table()
431 mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24)); in __tlb_remove_table()
437 mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); in __tlb_remove_table()
442 page_table_release_check(ptdesc_page(ptdesc), table, half, mask); in __tlb_remove_table()
444 call_rcu(&ptdesc->pt_rcu_head, pte_free_now); in __tlb_remove_table()
446 pte_free_now(&ptdesc->pt_rcu_head); in __tlb_remove_table()
510 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
512 return (next - 1) < (end - 1) ? next : end; \
562 return -ENOMEM; in base_segment_walk()
591 return -ENOMEM; in base_region3_walk()
619 return -ENOMEM; in base_region2_walk()
647 return -ENOMEM; in base_region1_walk()
661 * base_asce_free - free asce and tables returned from base_asce_alloc()
701 return base_pgt_cache ? 0 : -ENOMEM; in base_pgt_cache_init()
705 * base_asce_alloc - create kernel mapping without enhanced DAT features