Lines Matching +full:page +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0-only
29 #include <asm/page.h>
37 /* Fields set based on lines observed in the console. */
63 return -1; in kasan_suite_init()
70 * Temporarily enable multi-shot mode. Otherwise, KASAN would only in kasan_suite_init()
94 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
99 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
100 * checking is auto-disabled. When this happens, this test handler reenables
162 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in kmalloc_oob_right()
176 * An aligned access into the first out-of-bounds granule that falls in kmalloc_oob_right()
181 /* Out-of-bounds access past the aligned kmalloc object. */ in kmalloc_oob_right()
197 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); in kmalloc_oob_left()
216 * fit into a slab cache and therefore is allocated via the page allocator
266 struct page *pages; in pagealloc_oob_right()
271 * With generic KASAN page allocations have no redzones, thus in pagealloc_oob_right()
272 * out-of-bounds detection is not guaranteed. in pagealloc_oob_right()
288 struct page *pages; in pagealloc_uaf()
302 size_t size = KMALLOC_MAX_CACHE_SIZE - 256; in kmalloc_large_oob_right()
306 * and does not trigger the page allocator fallback in SLUB. in kmalloc_large_oob_right()
323 middle = size1 + (size2 - size1) / 2; in krealloc_more_oob_helper()
331 /* Suppress -Warray-bounds warnings. */ in krealloc_more_oob_helper()
335 ptr2[size1 - 1] = 'x'; in krealloc_more_oob_helper()
338 ptr2[size2 - 1] = 'x'; in krealloc_more_oob_helper()
358 middle = size2 + (size1 - size2) / 2; in krealloc_less_oob_helper()
366 /* Suppress -Warray-bounds warnings. */ in krealloc_less_oob_helper()
370 ptr2[size2 - 1] = 'x'; in krealloc_less_oob_helper()
389 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); in krealloc_less_oob_helper()
424 * Check that krealloc() detects a use-after-free, returns NULL,
454 ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0); in kmalloc_oob_16()
495 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_2()
503 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2)); in kmalloc_oob_memset_2()
510 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_4()
518 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4)); in kmalloc_oob_memset_4()
525 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_8()
533 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8)); in kmalloc_oob_memset_8()
540 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_16()
548 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16)); in kmalloc_oob_memset_16()
555 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_in_memset()
573 size_t invalid_size = -2; in kmalloc_memmove_negative_size()
578 * Hardware tag-based mode doesn't check memmove for negative size. in kmalloc_memmove_negative_size()
579 * As a result, this test introduces a side-effect memory corruption, in kmalloc_memmove_negative_size()
662 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. in kmalloc_uaf2()
677 * Check that KASAN detects use-after-free when another object was allocated in
678 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
685 /* This test is specifically crafted for tag-based modes. */ in kmalloc_uaf3()
703 struct page *page; in kfree_via_page() local
709 page = virt_to_page(ptr); in kfree_via_page()
711 kfree(page_address(page) + offset); in kfree_via_page()
795 p[i][0] = p[i][size - 1] = 42; in kmem_cache_bulk()
806 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS in kasan_global_oob_right()
829 char *p = array - 3; in kasan_global_oob_left()
844 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in ksize_unpoisons_memory()
857 ptr[size - 1] = 'x'; in ksize_unpoisons_memory()
863 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); in ksize_unpoisons_memory()
869 * Check that a use-after-free is detected by ksize() and via normal accesses
875 int size = 128 - KASAN_GRANULE_SIZE; in ksize_uaf()
905 char *p = array - 1; in kasan_alloca_oob_left()
1124 * below accesses are still out-of-bounds, since bitops are defined to in kasan_bitops_generic()
1141 /* This test is specifically crafted for tag-based modes. */ in kasan_bitops_tags()
1144 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ in kasan_bitops_tags()
1186 ((volatile struct kasan_rcu_info *)fp)->i; in rcu_uaf_reclaim()
1200 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); in rcu_uaf()
1225 ((volatile struct work_struct *)work)->data); in workqueue_uaf()
1232 /* This test is intended for tag-based modes. */ in vmalloc_helpers_tags()
1266 struct page *page; in vmalloc_oob() local
1267 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5; in vmalloc_oob()
1277 * We have to be careful not to hit the guard page in vmalloc tests. in vmalloc_oob()
1281 /* Make sure in-bounds accesses are valid. */ in vmalloc_oob()
1283 v_ptr[size - 1] = 0; in vmalloc_oob()
1292 /* An aligned access into the first out-of-bounds granule. */ in vmalloc_oob()
1295 /* Check that in-bounds accesses to the physical page are valid. */ in vmalloc_oob()
1296 page = vmalloc_to_page(v_ptr); in vmalloc_oob()
1297 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vmalloc_oob()
1298 p_ptr = page_address(page); in vmalloc_oob()
1305 * We can't check for use-after-unmap bugs in this nor in the following in vmalloc_oob()
1306 * vmalloc tests, as the page might be fully unmapped and accessing it in vmalloc_oob()
1314 struct page *p_page, *v_page; in vmap_tags()
1317 * This test is specifically crafted for the software tag-based mode, in vmap_tags()
1318 * the only tag-based mode that poisons vmap mappings. in vmap_tags()
1333 * We can't check for out-of-bounds bugs in this nor in the following in vmap_tags()
1334 * vmalloc tests, as allocations have page granularity and accessing in vmap_tags()
1335 * the guard page will crash the kernel. in vmap_tags()
1341 /* Make sure that in-bounds accesses through both pointers work. */ in vmap_tags()
1345 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */ in vmap_tags()
1357 struct page *page; in vm_map_ram_tags() local
1360 * This test is specifically crafted for the software tag-based mode, in vm_map_ram_tags()
1361 * the only tag-based mode that poisons vm_map_ram mappings. in vm_map_ram_tags()
1365 page = alloc_pages(GFP_KERNEL, 1); in vm_map_ram_tags()
1366 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vm_map_ram_tags()
1367 p_ptr = page_address(page); in vm_map_ram_tags()
1370 v_ptr = vm_map_ram(&page, 1, -1); in vm_map_ram_tags()
1376 /* Make sure that in-bounds accesses through both pointers work. */ in vm_map_ram_tags()
1386 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1392 struct page *pages; in match_all_not_assigned()
1429 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1455 /* Check that there are no match-all memory tags for tag-based modes. */