• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <fcntl.h>
18 #include <memory>
19 #include <sys/mman.h>
20 #include <sys/stat.h>
21 #include <sys/types.h>
22 
23 #include <linux/ion_test.h>
24 
25 #include <gtest/gtest.h>
26 
27 #include <ion/ion.h>
28 
29 #include "ion_test_fixture.h"
30 
31 #define ALIGN(x,y) (((x) + ((y) - 1)) & ~((y) - 1))
32 
33 class Device : public IonAllHeapsTest {
34  public:
35     virtual void SetUp();
36     virtual void TearDown();
37     int m_deviceFd;
38     void readDMA(int fd, void *buf, size_t size);
39     void writeDMA(int fd, void *buf, size_t size);
40     void readKernel(int fd, void *buf, size_t size);
41     void writeKernel(int fd, void *buf, size_t size);
42     void blowCache();
43     void dirtyCache(void *ptr, size_t size);
44 };
45 
SetUp()46 void Device::SetUp()
47 {
48     IonAllHeapsTest::SetUp();
49     m_deviceFd = open("/dev/ion-test", O_RDONLY);
50     ASSERT_GE(m_deviceFd, 0);
51 }
52 
TearDown()53 void Device::TearDown()
54 {
55     ASSERT_EQ(0, close(m_deviceFd));
56     IonAllHeapsTest::TearDown();
57 }
58 
readDMA(int fd,void * buf,size_t size)59 void Device::readDMA(int fd, void *buf, size_t size)
60 {
61     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
62     struct ion_test_rw_data ion_test_rw_data = {
63             .ptr = (uint64_t)buf,
64             .offset = 0,
65             .size = size,
66             .write = 0,
67     };
68 
69     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
70     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
71 }
72 
writeDMA(int fd,void * buf,size_t size)73 void Device::writeDMA(int fd, void *buf, size_t size)
74 {
75     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
76     struct ion_test_rw_data ion_test_rw_data = {
77             .ptr = (uint64_t)buf,
78             .offset = 0,
79             .size = size,
80             .write = 1,
81     };
82 
83     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
84     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
85 }
86 
readKernel(int fd,void * buf,size_t size)87 void Device::readKernel(int fd, void *buf, size_t size)
88 {
89     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
90     struct ion_test_rw_data ion_test_rw_data = {
91             .ptr = (uint64_t)buf,
92             .offset = 0,
93             .size = size,
94             .write = 0,
95     };
96 
97     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
98     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
99 }
100 
writeKernel(int fd,void * buf,size_t size)101 void Device::writeKernel(int fd, void *buf, size_t size)
102 {
103     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
104     struct ion_test_rw_data ion_test_rw_data = {
105             .ptr = (uint64_t)buf,
106             .offset = 0,
107             .size = size,
108             .write = 1,
109     };
110 
111     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
112     ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
113 }
114 
blowCache()115 void Device::blowCache()
116 {
117     const size_t bigger_than_cache = 8*1024*1024;
118     void *buf1 = malloc(bigger_than_cache);
119     void *buf2 = malloc(bigger_than_cache);
120     memset(buf1, 0xaa, bigger_than_cache);
121     memcpy(buf2, buf1, bigger_than_cache);
122     free(buf1);
123     free(buf2);
124 }
125 
dirtyCache(void * ptr,size_t size)126 void Device::dirtyCache(void *ptr, size_t size)
127 {
128     /* try to dirty cache lines */
129     for (size_t i = size-1; i > 0; i--) {
130         ((volatile char *)ptr)[i];
131         ((char *)ptr)[i] = i;
132     }
133 }
134 
TEST_F(Device,KernelReadCached)135 TEST_F(Device, KernelReadCached)
136 {
137     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
138     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
139 
140     for (unsigned int heapMask : m_allHeaps) {
141         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
142         int map_fd = -1;
143         unsigned int flags = ION_FLAG_CACHED;
144 
145         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
146         ASSERT_GE(map_fd, 0);
147 
148         void *ptr;
149         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
150         ASSERT_TRUE(ptr != NULL);
151 
152         for (int i = 0; i < 4096; i++)
153             ((char *)ptr)[i] = i;
154 
155         ((char*)buf)[4096] = 0x12;
156         readKernel(map_fd, buf, 4096);
157         ASSERT_EQ(((char*)buf)[4096], 0x12);
158 
159         for (int i = 0; i < 4096; i++)
160             ASSERT_EQ((char)i, ((char *)buf)[i]);
161 
162         ASSERT_EQ(0, munmap(ptr, 4096));
163         ASSERT_EQ(0, close(map_fd));
164     }
165 }
166 
TEST_F(Device,KernelWriteCached)167 TEST_F(Device, KernelWriteCached)
168 {
169     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
170     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
171 
172     for (int i = 0; i < 4096; i++)
173         ((char *)buf)[i] = i;
174 
175     for (unsigned int heapMask : m_allHeaps) {
176         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
177         int map_fd = -1;
178         unsigned int flags = ION_FLAG_CACHED;
179 
180         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
181         ASSERT_GE(map_fd, 0);
182 
183         void *ptr;
184         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
185         ASSERT_TRUE(ptr != NULL);
186 
187         dirtyCache(ptr, 4096);
188 
189         writeKernel(map_fd, buf, 4096);
190 
191         for (int i = 0; i < 4096; i++)
192             ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
193 
194         ASSERT_EQ(0, munmap(ptr, 4096));
195         ASSERT_EQ(0, close(map_fd));
196     }
197 }
198 
TEST_F(Device,DMAReadCached)199 TEST_F(Device, DMAReadCached)
200 {
201     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
202     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
203 
204     for (unsigned int heapMask : m_allHeaps) {
205         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
206         int map_fd = -1;
207         unsigned int flags = ION_FLAG_CACHED;
208 
209         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
210         ASSERT_GE(map_fd, 0);
211 
212         void *ptr;
213         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
214         ASSERT_TRUE(ptr != NULL);
215 
216         for (int i = 0; i < 4096; i++)
217             ((char *)ptr)[i] = i;
218 
219         readDMA(map_fd, buf, 4096);
220 
221         for (int i = 0; i < 4096; i++)
222             ASSERT_EQ((char)i, ((char *)buf)[i]);
223 
224         ASSERT_EQ(0, munmap(ptr, 4096));
225         ASSERT_EQ(0, close(map_fd));
226     }
227 }
228 
TEST_F(Device,DMAWriteCached)229 TEST_F(Device, DMAWriteCached)
230 {
231     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
232     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
233 
234     for (int i = 0; i < 4096; i++)
235         ((char *)buf)[i] = i;
236 
237     for (unsigned int heapMask : m_allHeaps) {
238         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
239         int map_fd = -1;
240         unsigned int flags = ION_FLAG_CACHED;
241 
242         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
243         ASSERT_GE(map_fd, 0);
244 
245         void *ptr;
246         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
247         ASSERT_TRUE(ptr != NULL);
248 
249         dirtyCache(ptr, 4096);
250 
251         writeDMA(map_fd, buf, 4096);
252 
253         for (int i = 0; i < 4096; i++)
254             ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
255 
256         ASSERT_EQ(0, munmap(ptr, 4096));
257         ASSERT_EQ(0, close(map_fd));
258     }
259 }
260 
TEST_F(Device,KernelReadCachedNeedsSync)261 TEST_F(Device, KernelReadCachedNeedsSync)
262 {
263     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
264     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
265 
266     for (unsigned int heapMask : m_allHeaps) {
267         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
268         int map_fd = -1;
269         unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
270 
271         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
272         ASSERT_GE(map_fd, 0);
273 
274         void *ptr;
275         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
276         ASSERT_TRUE(ptr != NULL);
277 
278         for (int i = 0; i < 4096; i++)
279             ((char *)ptr)[i] = i;
280 
281         ((char*)buf)[4096] = 0x12;
282         readKernel(map_fd, buf, 4096);
283         ASSERT_EQ(((char*)buf)[4096], 0x12);
284 
285         for (int i = 0; i < 4096; i++)
286             ASSERT_EQ((char)i, ((char *)buf)[i]);
287 
288         ASSERT_EQ(0, munmap(ptr, 4096));
289         ASSERT_EQ(0, close(map_fd));
290     }
291 }
292 
TEST_F(Device,KernelWriteCachedNeedsSync)293 TEST_F(Device, KernelWriteCachedNeedsSync)
294 {
295     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
296     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
297 
298     for (int i = 0; i < 4096; i++)
299         ((char *)buf)[i] = i;
300 
301     for (unsigned int heapMask : m_allHeaps) {
302         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
303         int map_fd = -1;
304         unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
305 
306         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
307         ASSERT_GE(map_fd, 0);
308 
309         void *ptr;
310         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
311         ASSERT_TRUE(ptr != NULL);
312 
313         dirtyCache(ptr, 4096);
314 
315         writeKernel(map_fd, buf, 4096);
316 
317         for (int i = 0; i < 4096; i++)
318             ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
319 
320         ASSERT_EQ(0, munmap(ptr, 4096));
321         ASSERT_EQ(0, close(map_fd));
322     }
323 }
324 
TEST_F(Device,DMAReadCachedNeedsSync)325 TEST_F(Device, DMAReadCachedNeedsSync)
326 {
327     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
328     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
329 
330     for (unsigned int heapMask : m_allHeaps) {
331         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
332         int map_fd = -1;
333         unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
334 
335         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
336         ASSERT_GE(map_fd, 0);
337 
338         void *ptr;
339         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
340         ASSERT_TRUE(ptr != NULL);
341 
342         for (int i = 0; i < 4096; i++)
343             ((char *)ptr)[i] = i;
344 
345         ion_sync_fd(m_ionFd, map_fd);
346 
347         readDMA(map_fd, buf, 4096);
348 
349         for (int i = 0; i < 4096; i++)
350             ASSERT_EQ((char)i, ((char *)buf)[i]);
351 
352         ASSERT_EQ(0, munmap(ptr, 4096));
353         ASSERT_EQ(0, close(map_fd));
354     }
355 }
356 
TEST_F(Device,DMAWriteCachedNeedsSync)357 TEST_F(Device, DMAWriteCachedNeedsSync)
358 {
359     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
360     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
361 
362     for (int i = 0; i < 4096; i++)
363         ((char *)buf)[i] = i;
364 
365     for (unsigned int heapMask : m_allHeaps) {
366         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
367         int map_fd = -1;
368         unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
369 
370         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
371         ASSERT_GE(map_fd, 0);
372 
373         void *ptr;
374         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
375         ASSERT_TRUE(ptr != NULL);
376 
377         dirtyCache(ptr, 4096);
378 
379         writeDMA(map_fd, buf, 4096);
380 
381         ion_sync_fd(m_ionFd, map_fd);
382 
383         for (int i = 0; i < 4096; i++)
384             ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
385 
386         ASSERT_EQ(0, munmap(ptr, 4096));
387         ASSERT_EQ(0, close(map_fd));
388     }
389 }
TEST_F(Device,KernelRead)390 TEST_F(Device, KernelRead)
391 {
392     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
393     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
394 
395     for (unsigned int heapMask : m_allHeaps) {
396         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
397         int map_fd = -1;
398         unsigned int flags = 0;
399 
400         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
401         ASSERT_GE(map_fd, 0);
402 
403         void *ptr;
404         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
405         ASSERT_TRUE(ptr != NULL);
406 
407         for (int i = 0; i < 4096; i++)
408             ((char *)ptr)[i] = i;
409 
410         ((char*)buf)[4096] = 0x12;
411         readKernel(map_fd, buf, 4096);
412         ASSERT_EQ(((char*)buf)[4096], 0x12);
413 
414         for (int i = 0; i < 4096; i++)
415             ASSERT_EQ((char)i, ((char *)buf)[i]);
416 
417         ASSERT_EQ(0, munmap(ptr, 4096));
418         ASSERT_EQ(0, close(map_fd));
419     }
420 }
421 
TEST_F(Device,KernelWrite)422 TEST_F(Device, KernelWrite)
423 {
424     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
425     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
426 
427     for (int i = 0; i < 4096; i++)
428         ((char *)buf)[i] = i;
429 
430     for (unsigned int heapMask : m_allHeaps) {
431         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
432         int map_fd = -1;
433         unsigned int flags = 0;
434 
435         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
436         ASSERT_GE(map_fd, 0);
437 
438         void *ptr;
439         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
440         ASSERT_TRUE(ptr != NULL);
441 
442         dirtyCache(ptr, 4096);
443 
444         writeKernel(map_fd, buf, 4096);
445 
446         for (int i = 0; i < 4096; i++)
447             ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
448 
449         ASSERT_EQ(0, munmap(ptr, 4096));
450         ASSERT_EQ(0, close(map_fd));
451     }
452 }
453 
TEST_F(Device,DMARead)454 TEST_F(Device, DMARead)
455 {
456     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
457     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
458 
459     for (unsigned int heapMask : m_allHeaps) {
460         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
461         int map_fd = -1;
462         unsigned int flags = 0;
463 
464         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
465         ASSERT_GE(map_fd, 0);
466 
467         void *ptr;
468         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
469         ASSERT_TRUE(ptr != NULL);
470 
471         for (int i = 0; i < 4096; i++)
472             ((char *)ptr)[i] = i;
473 
474         readDMA(map_fd, buf, 4096);
475 
476         for (int i = 0; i < 4096; i++)
477             ASSERT_EQ((char)i, ((char *)buf)[i]);
478 
479         ASSERT_EQ(0, munmap(ptr, 4096));
480         ASSERT_EQ(0, close(map_fd));
481     }
482 }
483 
TEST_F(Device,DMAWrite)484 TEST_F(Device, DMAWrite)
485 {
486     auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
487     void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
488 
489     for (int i = 0; i < 4096; i++)
490         ((char *)buf)[i] = i;
491 
492     for (unsigned int heapMask : m_allHeaps) {
493         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
494         int map_fd = -1;
495         unsigned int flags = 0;
496 
497         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
498         ASSERT_GE(map_fd, 0);
499 
500         void *ptr;
501         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
502         ASSERT_TRUE(ptr != NULL);
503 
504         dirtyCache(ptr, 4096);
505 
506         writeDMA(map_fd, buf, 4096);
507 
508         for (int i = 0; i < 4096; i++)
509             ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
510 
511         ASSERT_EQ(0, munmap(ptr, 4096));
512         ASSERT_EQ(0, close(map_fd));
513     }
514 }
515 
TEST_F(Device,IsCached)516 TEST_F(Device, IsCached)
517 {
518     auto buf_ptr = std::make_unique<char[]>(4096);
519     void *buf = buf_ptr.get();
520 
521     for (unsigned int heapMask : m_allHeaps) {
522         SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
523         int map_fd = -1;
524         unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
525 
526         ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
527         ASSERT_GE(map_fd, 0);
528 
529         void *ptr;
530         ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
531         ASSERT_TRUE(ptr != NULL);
532 
533         dirtyCache(ptr, 4096);
534 
535         readDMA(map_fd, buf, 4096);
536 
537         bool same = true;
538         for (int i = 4096-16; i >= 0; i -= 16)
539             if (((char *)buf)[i] != i)
540                 same = false;
541         ASSERT_FALSE(same);
542 
543         ASSERT_EQ(0, munmap(ptr, 4096));
544         ASSERT_EQ(0, close(map_fd));
545     }
546 }
547