/external/igt-gpu-tools/tests/i915/ |
D | gem_mmap_wc.c | 52 static int OBJECT_SIZE = 16*1024*1024; variable 63 ptr = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); in local_gem_mmap__wc() 79 ptr = local_gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); in mmap_bo() 90 handle = gem_create(fd, OBJECT_SIZE); in create_pointer() 148 memcpy(dst, src, OBJECT_SIZE); in test_copy() 149 memcpy(src, dst, OBJECT_SIZE); in test_copy() 151 munmap(dst, OBJECT_SIZE); in test_copy() 152 munmap(src, OBJECT_SIZE); in test_copy() 167 handle = gem_create(fd, OBJECT_SIZE); in test_read_write() 182 munmap(ptr, OBJECT_SIZE); in test_read_write() [all …]
|
D | gem_set_tiling_vs_pwrite.c | 42 #define OBJECT_SIZE (1024*1024) macro 53 uint32_t data[OBJECT_SIZE/4]; 61 for (i = 0; i < OBJECT_SIZE/4; i++) 64 handle = gem_create(fd, OBJECT_SIZE); 65 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); 74 gem_write(fd, handle, 0, data, OBJECT_SIZE); 75 memset(data, 0, OBJECT_SIZE); 76 gem_read(fd, handle, 0, data, OBJECT_SIZE); 77 for (i = 0; i < OBJECT_SIZE/4; i++) 87 gem_write(fd, handle, 0, data, OBJECT_SIZE); [all …]
|
D | i915_suspend.c | 44 #define OBJECT_SIZE (16*1024*1024) macro 55 handle1 = gem_create(fd, OBJECT_SIZE); in test_fence_restore() 56 handle2 = gem_create(fd, OBJECT_SIZE); in test_fence_restore() 57 handle_tiled = gem_create(fd, OBJECT_SIZE); in test_fence_restore() 60 ptr1 = gem_mmap__gtt(fd, handle1, OBJECT_SIZE, PROT_READ | PROT_WRITE); in test_fence_restore() 62 for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++) in test_fence_restore() 65 ptr_tiled = gem_mmap__gtt(fd, handle_tiled, OBJECT_SIZE, in test_fence_restore() 71 for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++) in test_fence_restore() 74 ptr2 = gem_mmap__gtt(fd, handle2, OBJECT_SIZE, PROT_READ | PROT_WRITE); in test_fence_restore() 76 for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++) in test_fence_restore() [all …]
|
D | gem_mmap_gtt.c | 50 static int OBJECT_SIZE = 16*1024*1024; variable 63 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); in mmap_bo() 74 handle = gem_create(fd, OBJECT_SIZE); in create_pointer() 90 handle = gem_create(fd, OBJECT_SIZE); in test_access() 99 igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, in test_access() 103 igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, in test_access() 114 igt_assert(mmap64(0, OBJECT_SIZE, PROT_READ | PROT_WRITE, in test_access() 124 mmap_arg.handle = gem_create(fd, OBJECT_SIZE); in test_short() 128 for (pages = 1; pages <= OBJECT_SIZE / PAGE_SIZE; pages <<= 1) { in test_short() 160 memcpy(dst, src, OBJECT_SIZE); in test_copy() [all …]
|
D | gem_streaming_writes.c | 41 #define OBJECT_SIZE 1024*1024 macro 79 exec[SRC].handle = gem_create(fd, OBJECT_SIZE); in test_streaming() 80 exec[DST].handle = gem_create(fd, OBJECT_SIZE); in test_streaming() 85 s = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, in test_streaming() 89 s = gem_mmap__gtt(fd, src, OBJECT_SIZE, in test_streaming() 93 s = gem_mmap__wc(fd, src, 0, OBJECT_SIZE, in test_streaming() 99 d = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ); in test_streaming() 143 batch = malloc(sizeof(*batch) * (OBJECT_SIZE / CHUNK_SIZE / 64)); in test_streaming() 144 for (i = n = 0; i < OBJECT_SIZE / CHUNK_SIZE / 64; i++) { in test_streaming() 188 for (i = 0; i < OBJECT_SIZE/4; i++) in test_streaming() [all …]
|
D | gem_set_tiling_vs_gtt.c | 42 #define OBJECT_SIZE (1024*1024) macro 53 uint32_t data[OBJECT_SIZE/4]; 68 handle = gem_create(fd, OBJECT_SIZE); 69 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); 73 for (i = 0; i < OBJECT_SIZE/4; i++) 83 for (i = 0; i < OBJECT_SIZE/4; i++) 89 for (i = 0; i < OBJECT_SIZE/4; i++) 96 for (i = 0; i < OBJECT_SIZE/4; i++) { 115 for (i = 0; i < OBJECT_SIZE/4; i++) 124 for (i = 0; i < OBJECT_SIZE/4; i++) [all …]
|
D | gem_readwrite.c | 40 #define OBJECT_SIZE 16384 macro 80 uint8_t expected[OBJECT_SIZE]; 81 uint8_t buf[OBJECT_SIZE]; 89 handle = gem_create(fd, OBJECT_SIZE); 94 ret = do_read(fd, handle, buf, 0, OBJECT_SIZE); 102 ret = do_read(fd, handle, buf, OBJECT_SIZE / 2, OBJECT_SIZE); 111 ret = do_write(fd, handle, buf, 0, OBJECT_SIZE); 113 ret = do_read(fd, handle, buf, 0, OBJECT_SIZE); 122 ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
|
D | gem_madvise.c | 44 #define OBJECT_SIZE (1024*1024) macro 64 handle = gem_create(fd, OBJECT_SIZE); in dontneed_before_mmap() 66 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); in dontneed_before_mmap() 80 munmap(ptr, OBJECT_SIZE); in dontneed_before_mmap() 92 handle = gem_create(fd, OBJECT_SIZE); in dontneed_after_mmap() 93 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); in dontneed_after_mmap() 108 munmap(ptr, OBJECT_SIZE); in dontneed_after_mmap() 119 handle = gem_create(fd, OBJECT_SIZE); in dontneed_before_pwrite() 138 exec.handle = gem_create(fd, OBJECT_SIZE); in dontneed_before_exec()
|
D | gem_fence_upload.c | 43 #define OBJECT_SIZE (1024*1024) /* restricted to 1MiB alignment on i915 fences */ macro 68 handle[n] = gem_create(fd, OBJECT_SIZE); in performance() 69 ptr[n] = gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE); in performance() 75 memset(ptr[n], 0, OBJECT_SIZE); in performance() 88 memset(ptr[n], 0, OBJECT_SIZE); in performance() 96 munmap(ptr[n], OBJECT_SIZE); in performance() 175 handle[n] = gem_create(fd, OBJECT_SIZE); in thread_performance() 176 ptr[n] = gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE); in thread_performance() 210 linear[count != 2] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096); in thread_performance() 231 tiled[count != 2] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096); in thread_performance() [all …]
|
D | gem_fence_thrash.c | 49 #define OBJECT_SIZE (128*1024) /* restricted to 1MiB alignment on i915 fences */ macro 70 handle = gem_create(fd, OBJECT_SIZE); in bo_create() 73 ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, in bo_create() 75 memset(ptr, 0, OBJECT_SIZE); in bo_create() 76 munmap(ptr, OBJECT_SIZE); in bo_create() 80 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); in bo_create() 100 memcpy (a, b, OBJECT_SIZE); in bo_copy() 104 munmap(a, OBJECT_SIZE); in bo_copy() 105 munmap(b, OBJECT_SIZE); in bo_copy() 126 unsigned int dwords = OBJECT_SIZE >> 2; in _bo_write_verify() [all …]
|
D | gem_mmap.c | 40 #define OBJECT_SIZE 16384 macro 151 uint8_t expected[OBJECT_SIZE]; 152 uint8_t buf[OBJECT_SIZE]; 234 .handle = gem_create(fd, OBJECT_SIZE), 235 .size = OBJECT_SIZE, 248 gem_write(fd, arg.handle, 0, buf, OBJECT_SIZE); 256 munmap(addr, OBJECT_SIZE); 260 uint32_t handle = gem_create(fd, OBJECT_SIZE); 262 igt_assert(OBJECT_SIZE > 4096);
|
D | gem_mmap_offset_exhaustion.c | 43 #define OBJECT_SIZE (1024*1024) macro 61 handle = gem_create(fd, OBJECT_SIZE); in create_and_map_bo() 63 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); in create_and_map_bo() 70 munmap(ptr, OBJECT_SIZE); in create_and_map_bo()
|
D | gem_pwrite_pread.c | 43 #define OBJECT_SIZE 16384 macro 115 src_ptr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE); in as_gtt_mmap() 116 dst_ptr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ); in as_gtt_mmap() 140 src_ptr = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE); in as_cpu_mmap() 141 dst_ptr = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ); in as_cpu_mmap() 185 src_ptr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE); in test_as_gtt_mmap() 186 dst_ptr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ); in test_as_gtt_mmap() 209 src_ptr = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE); in test_as_cpu_mmap() 210 dst_ptr = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ); in test_as_cpu_mmap() 280 object_size = OBJECT_SIZE;
|
D | gem_close_race.c | 45 #define OBJECT_SIZE (256 * 1024) macro 132 handle = gem_create(fd, OBJECT_SIZE); in load() 186 name.name = gem_flink(fd, gem_create(fd, OBJECT_SIZE)); in threads()
|
D | gem_exec_blt.c | 43 #define OBJECT_SIZE 16384 macro 327 run(fd, OBJECT_SIZE, false); 330 run(fd, OBJECT_SIZE, false); 333 run(fd, OBJECT_SIZE, true);
|
D | gem_exec_faulting_reloc.c | 50 #define OBJECT_SIZE 16384 macro 247 run(OBJECT_SIZE); 250 run(OBJECT_SIZE);
|
D | gem_fenced_exec_thrash.c | 39 #define OBJECT_SIZE (4*WIDTH*HEIGHT) macro 62 handle = gem_create(fd, OBJECT_SIZE); in tiled_bo_create()
|
D | gem_pread.c | 74 #define OBJECT_SIZE 16384 macro 150 object_size = OBJECT_SIZE;
|
D | gem_pwrite.c | 74 #define OBJECT_SIZE 16384 macro 277 object_size = OBJECT_SIZE;
|
D | gem_gtt_speed.c | 44 #define OBJECT_SIZE 16384 macro 89 int size = OBJECT_SIZE;
|
/external/igt-gpu-tools/benchmarks/ |
D | gem_mmap.c | 48 #define OBJECT_SIZE (1<<23) macro 63 void *buf = malloc(OBJECT_SIZE); in main() 118 handle = gem_create(fd, OBJECT_SIZE); in main() 121 ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); in main() 125 ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE); in main() 129 ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); in main() 150 memset(dst, 0, OBJECT_SIZE); in main() 153 memcpy(dst, src, OBJECT_SIZE); in main() 166 memset(dst, 0, OBJECT_SIZE); in main() 169 munmap(ptr, OBJECT_SIZE); in main() [all …]
|
D | gem_prw.c | 47 #define OBJECT_SIZE (1<<23) macro 60 void *buf = malloc(OBJECT_SIZE); in main() 93 handle = gem_create(fd, OBJECT_SIZE); in main() 94 for (size = 1; size <= OBJECT_SIZE; size <<= 1) { in main()
|
D | gem_create.c | 48 #define OBJECT_SIZE (1<<23) macro 116 for (s = 4096; s <= OBJECT_SIZE; s <<= 1) { in main()
|