• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * Handle issues around the Tile "home cache" model of coherence.
15  */
16 
17 #ifndef _ASM_TILE_HOMECACHE_H
18 #define _ASM_TILE_HOMECACHE_H
19 
20 #include <asm/page.h>
21 #include <linux/cpumask.h>
22 
23 struct page;
24 struct task_struct;
25 struct vm_area_struct;
26 struct zone;
27 
28 /*
29  * Coherence point for the page is its memory controller.
30  * It is not present in any cache (L1 or L2).
31  */
32 #define PAGE_HOME_UNCACHED -1
33 
34 /*
35  * Is this page immutable (unwritable) and thus able to be cached more
36  * widely than would otherwise be possible?  This means we have "nc" set.
37  */
38 #define PAGE_HOME_IMMUTABLE -2
39 
40 /*
41  * Each cpu considers its own cache to be the home for the page,
42  * which makes it incoherent.
43  */
44 #define PAGE_HOME_INCOHERENT -3
45 
46 /* Home for the page is distributed via hash-for-home. */
47 #define PAGE_HOME_HASH -4
48 
49 /* Support wrapper to use instead of explicit hv_flush_remote(). */
50 extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
51 			 const struct cpumask *cache_cpumask,
52 			 HV_VirtAddr tlb_va, unsigned long tlb_length,
53 			 unsigned long tlb_pgsize,
54 			 const struct cpumask *tlb_cpumask,
55 			 HV_Remote_ASID *asids, int asidcount);
56 
57 /* Set homing-related bits in a PTE (can also pass a pgprot_t). */
58 extern pte_t pte_set_home(pte_t pte, int home);
59 
60 /* Do a cache eviction on the specified cpus. */
61 extern void homecache_evict(const struct cpumask *mask);
62 
63 /*
64  * Change a kernel page's homecache.  It must not be mapped in user space.
65  * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
66  * no other cpu can reference the page, and causes a full-chip cache/TLB flush.
67  */
68 extern void homecache_change_page_home(struct page *, int order, int home);
69 
70 /*
71  * Flush a page out of whatever cache(s) it is in.
72  * This is more than just finv, since it properly handles waiting
73  * for the data to reach memory, but it can be quite
74  * heavyweight, particularly on incoherent or immutable memory.
75  */
76 extern void homecache_finv_page(struct page *);
77 
78 /*
79  * Flush a page out of the specified home cache.
80  * Note that the specified home need not be the actual home of the page,
81  * as for example might be the case when coordinating with I/O devices.
82  */
83 extern void homecache_finv_map_page(struct page *, int home);
84 
85 /*
86  * Allocate a page with the given GFP flags, home, and optionally
87  * node.  These routines are actually just wrappers around the normal
88  * alloc_pages() / alloc_pages_node() functions, which set and clear
89  * a per-cpu variable to communicate with homecache_new_kernel_page().
90  * If !CONFIG_HOMECACHE, uses homecache_change_page_home().
91  */
92 extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
93 					  unsigned int order, int home);
94 extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
95 					       unsigned int order, int home);
96 #define homecache_alloc_page(gfp_mask, home) \
97   homecache_alloc_pages(gfp_mask, 0, home)
98 
99 /*
100  * These routines are just pass-throughs to free_pages() when
101  * we support full homecaching.  If !CONFIG_HOMECACHE, then these
102  * routines use homecache_change_page_home() to reset the home
103  * back to the default before returning the page to the allocator.
104  */
105 void __homecache_free_pages(struct page *, unsigned int order);
106 void homecache_free_pages(unsigned long addr, unsigned int order);
107 #define __homecache_free_page(page) __homecache_free_pages((page), 0)
108 #define homecache_free_page(page) homecache_free_pages((page), 0)
109 
110 
111 /*
112  * Report the page home for LOWMEM pages by examining their kernel PTE,
113  * or for highmem pages as the default home.
114  */
115 extern int page_home(struct page *);
116 
117 #define homecache_migrate_kthread() do {} while (0)
118 
119 #define homecache_kpte_lock() 0
120 #define homecache_kpte_unlock(flags) do {} while (0)
121 
122 
123 #endif /* _ASM_TILE_HOMECACHE_H */
124