Lines Matching +full:half +full:- +full:bit
1 // SPDX-License-Identifier: GPL-2.0
38 return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM; in page_table_register_sysctl()
66 if (current->active_mm == mm) { in __crst_table_upgrade()
67 S390_lowcore.user_asce = mm->context.asce; in __crst_table_upgrade()
76 unsigned long asce_limit = mm->context.asce_limit; in crst_table_upgrade()
97 spin_lock_bh(&mm->page_table_lock); in crst_table_upgrade()
104 VM_BUG_ON(asce_limit != mm->context.asce_limit); in crst_table_upgrade()
107 __pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
109 mm->pgd = (pgd_t *) p4d; in crst_table_upgrade()
110 mm->context.asce_limit = _REGION1_SIZE; in crst_table_upgrade()
111 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade()
116 __pgd = (unsigned long *) mm->pgd; in crst_table_upgrade()
118 mm->pgd = (pgd_t *) pgd; in crst_table_upgrade()
119 mm->context.asce_limit = TASK_SIZE_MAX; in crst_table_upgrade()
120 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade()
124 spin_unlock_bh(&mm->page_table_lock); in crst_table_upgrade()
133 return -ENOMEM; in crst_table_upgrade()
166 * A 2KB-pgtable is either upper or lower half of a normal page.
167 * The second half of the page may be unused or used as another
168 * 2KB-pgtable.
170 * Whenever possible the parent page for a new 2KB-pgtable is picked
175 * When a parent page gets fully allocated it contains 2KB-pgtables in both
178 * When 2KB-pgtable is freed from to fully allocated parent page that
181 * If 2KB-pgtable is freed from the partially allocated parent page that
188 * The upper byte (bits 24-31) of the parent page _refcount is used
189 * for tracking contained 2KB-pgtables and has the following format:
192 * 01234567 upper byte (bits 24-31) of struct page::_refcount
194 * || |+--- upper 2KB-pgtable is allocated
195 * || +---- lower 2KB-pgtable is allocated
196 * |+------- upper 2KB-pgtable is pending for removal
197 * +-------- lower 2KB-pgtable is pending for removal
202 * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
204 * - added to mm_context_t::pgtable_list in case the second half of the
206 * - removed from mm_context_t::pgtable_list in case both hales of the
210 * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
211 * and the corresponding PP bit is set to 1 in a single atomic operation.
212 * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
215 * - added to mm_context_t::pgtable_list in case the second half of the
217 * - removed from mm_context_t::pgtable_list in case the second half of
225 * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
226 * while both AA bits and the second PP bit are already unset. Then the
227 * parent page does not contain any 2KB-pgtable fragment anymore, and it has
231 * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
232 * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
240 * But for simplicity, because page->rcu_head overlays page->lru, and because
249 unsigned int mask, bit; in page_table_alloc() local
254 spin_lock_bh(&mm->context.lock); in page_table_alloc()
255 if (!list_empty(&mm->context.pgtable_list)) { in page_table_alloc()
256 ptdesc = list_first_entry(&mm->context.pgtable_list, in page_table_alloc()
258 mask = atomic_read(&ptdesc->_refcount) >> 24; in page_table_alloc()
271 bit = mask & 1; /* =1 -> second 2K */ in page_table_alloc()
272 if (bit) in page_table_alloc()
274 atomic_xor_bits(&ptdesc->_refcount, in page_table_alloc()
275 0x01U << (bit + 24)); in page_table_alloc()
276 list_del_init(&ptdesc->pt_list); in page_table_alloc()
279 spin_unlock_bh(&mm->context.lock); in page_table_alloc()
296 INIT_LIST_HEAD(&ptdesc->pt_list); in page_table_alloc()
297 atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); in page_table_alloc()
302 atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24); in page_table_alloc()
304 spin_lock_bh(&mm->context.lock); in page_table_alloc()
305 list_add(&ptdesc->pt_list, &mm->context.pgtable_list); in page_table_alloc()
306 spin_unlock_bh(&mm->context.lock); in page_table_alloc()
312 unsigned int half, unsigned int mask) in page_table_release_check() argument
318 if (!mask && list_empty(&page->lru)) in page_table_release_check()
321 "Invalid pgtable %p release half 0x%02x mask 0x%02x", in page_table_release_check()
322 table, half, mask); in page_table_release_check()
337 unsigned int mask, bit, half; in page_table_free() local
342 bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); in page_table_free()
343 spin_lock_bh(&mm->context.lock); in page_table_free()
349 mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24)); in page_table_free()
353 * Other half is allocated, and neither half has had in page_table_free()
355 * this freed half available for immediate reuse. in page_table_free()
357 list_add(&ptdesc->pt_list, &mm->context.pgtable_list); in page_table_free()
360 list_del_init(&ptdesc->pt_list); in page_table_free()
362 spin_unlock_bh(&mm->context.lock); in page_table_free()
363 mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24)); in page_table_free()
367 half = 0x01U << bit; in page_table_free()
369 half = 0x03U; in page_table_free()
370 mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); in page_table_free()
374 page_table_release_check(ptdesc_page(ptdesc), table, half, mask); in page_table_free()
376 call_rcu(&ptdesc->pt_rcu_head, pte_free_now); in page_table_free()
378 pte_free_now(&ptdesc->pt_rcu_head); in page_table_free()
385 unsigned int bit, mask; in page_table_free_rcu() local
388 mm = tlb->mm; in page_table_free_rcu()
395 bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); in page_table_free_rcu()
396 spin_lock_bh(&mm->context.lock); in page_table_free_rcu()
402 mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24)); in page_table_free_rcu()
406 * Other half is allocated, and neither half has had in page_table_free_rcu()
408 * this freed half available for reuse once its pending in page_table_free_rcu()
409 * bit has been cleared by __tlb_remove_table(). in page_table_free_rcu()
411 list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list); in page_table_free_rcu()
414 list_del_init(&ptdesc->pt_list); in page_table_free_rcu()
416 spin_unlock_bh(&mm->context.lock); in page_table_free_rcu()
417 table = (unsigned long *) ((unsigned long) table | (0x01U << bit)); in page_table_free_rcu()
423 unsigned int mask = (unsigned long) _table & 0x03U, half = mask; in __tlb_remove_table() local
427 switch (half) { in __tlb_remove_table()
433 mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24)); in __tlb_remove_table()
439 mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); in __tlb_remove_table()
444 page_table_release_check(ptdesc_page(ptdesc), table, half, mask); in __tlb_remove_table()
446 call_rcu(&ptdesc->pt_rcu_head, pte_free_now); in __tlb_remove_table()
448 pte_free_now(&ptdesc->pt_rcu_head); in __tlb_remove_table()
514 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
516 return (next - 1) < (end - 1) ? next : end; \
566 return -ENOMEM; in base_segment_walk()
595 return -ENOMEM; in base_region3_walk()
623 return -ENOMEM; in base_region2_walk()
651 return -ENOMEM; in base_region1_walk()
665 * base_asce_free - free asce and tables returned from base_asce_alloc()
705 return base_pgt_cache ? 0 : -ENOMEM; in base_pgt_cache_init()
709 * base_asce_alloc - create kernel mapping without enhanced DAT features