• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef GEN_CLFLUSH_H
25 #define GEN_CLFLUSH_H
26 
27 #define CACHELINE_SIZE 64
28 #define CACHELINE_MASK 63
29 
30 #if !defined(__i686__) && !defined(_x86_64__)
31 #warning Unsupported architecture
32 #define __builtin_ia32_mfence()
33 #define __builtin_ia32_clflush(vaddr)
34 #endif
35 
36 static inline void
gen_clflush_range(void * start,size_t size)37 gen_clflush_range(void *start, size_t size)
38 {
39    void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
40    void *end = start + size;
41 
42    while (p < end) {
43       __builtin_ia32_clflush(p);
44       p += CACHELINE_SIZE;
45    }
46 }
47 
48 static inline void
gen_flush_range(void * start,size_t size)49 gen_flush_range(void *start, size_t size)
50 {
51    __builtin_ia32_mfence();
52    gen_clflush_range(start, size);
53 }
54 
55 static inline void
gen_invalidate_range(void * start,size_t size)56 gen_invalidate_range(void *start, size_t size)
57 {
58    gen_clflush_range(start, size);
59 
60    /* Modern Atom CPUs (Baytrail+) have issues with clflush serialization,
61     * where mfence is not a sufficient synchronization barrier.  We must
62     * double clflush the last cacheline.  This guarantees it will be ordered
63     * after the preceding clflushes, and then the mfence guards against
64     * prefetches crossing the clflush boundary.
65     *
66     * See kernel commit 396f5d62d1a5fd99421855a08ffdef8edb43c76e
67     * ("drm: Restore double clflush on the last partial cacheline")
68     * and https://bugs.freedesktop.org/show_bug.cgi?id=92845.
69     */
70    __builtin_ia32_clflush(start + size - 1);
71    __builtin_ia32_mfence();
72 }
73 
74 #endif
75