• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
15 
16 static u32 *flush_words;
17 
18 const struct pci_device_id amd_nb_misc_ids[] = {
19 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
24 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
25 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
26 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
27 	{}
28 };
29 EXPORT_SYMBOL(amd_nb_misc_ids);
30 
31 static const struct pci_device_id amd_nb_link_ids[] = {
32 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
33 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
34 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
35 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
36 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
37 	{}
38 };
39 
40 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
41 	{ 0x00, 0x18, 0x20 },
42 	{ 0xff, 0x00, 0x20 },
43 	{ 0xfe, 0x00, 0x20 },
44 	{ }
45 };
46 
47 struct amd_northbridge_info amd_northbridges;
48 EXPORT_SYMBOL(amd_northbridges);
49 
next_northbridge(struct pci_dev * dev,const struct pci_device_id * ids)50 static struct pci_dev *next_northbridge(struct pci_dev *dev,
51 					const struct pci_device_id *ids)
52 {
53 	do {
54 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
55 		if (!dev)
56 			break;
57 	} while (!pci_match_id(ids, dev));
58 	return dev;
59 }
60 
amd_cache_northbridges(void)61 int amd_cache_northbridges(void)
62 {
63 	u16 i = 0;
64 	struct amd_northbridge *nb;
65 	struct pci_dev *misc, *link;
66 
67 	if (amd_nb_num())
68 		return 0;
69 
70 	misc = NULL;
71 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
72 		i++;
73 
74 	if (!i)
75 		return -ENODEV;
76 
77 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
78 	if (!nb)
79 		return -ENOMEM;
80 
81 	amd_northbridges.nb = nb;
82 	amd_northbridges.num = i;
83 
84 	link = misc = NULL;
85 	for (i = 0; i != amd_nb_num(); i++) {
86 		node_to_amd_nb(i)->misc = misc =
87 			next_northbridge(misc, amd_nb_misc_ids);
88 		node_to_amd_nb(i)->link = link =
89 			next_northbridge(link, amd_nb_link_ids);
90 	}
91 
92 	if (amd_gart_present())
93 		amd_northbridges.flags |= AMD_NB_GART;
94 
95 	/*
96 	 * Check for L3 cache presence.
97 	 */
98 	if (!cpuid_edx(0x80000006))
99 		return 0;
100 
101 	/*
102 	 * Some CPU families support L3 Cache Index Disable. There are some
103 	 * limitations because of E382 and E388 on family 0x10.
104 	 */
105 	if (boot_cpu_data.x86 == 0x10 &&
106 	    boot_cpu_data.x86_model >= 0x8 &&
107 	    (boot_cpu_data.x86_model > 0x9 ||
108 	     boot_cpu_data.x86_stepping >= 0x1))
109 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110 
111 	if (boot_cpu_data.x86 == 0x15)
112 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113 
114 	/* L3 cache partitioning is supported on family 0x15 */
115 	if (boot_cpu_data.x86 == 0x15)
116 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117 
118 	return 0;
119 }
120 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121 
122 /*
123  * Ignores subdevice/subvendor but as far as I can figure out
124  * they're useless anyways
125  */
early_is_amd_nb(u32 device)126 bool __init early_is_amd_nb(u32 device)
127 {
128 	const struct pci_device_id *id;
129 	u32 vendor = device & 0xffff;
130 
131 	device >>= 16;
132 	for (id = amd_nb_misc_ids; id->vendor; id++)
133 		if (vendor == id->vendor && device == id->device)
134 			return true;
135 	return false;
136 }
137 
amd_get_mmconfig_range(struct resource * res)138 struct resource *amd_get_mmconfig_range(struct resource *res)
139 {
140 	u32 address;
141 	u64 base, msr;
142 	unsigned segn_busn_bits;
143 
144 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
145 		return NULL;
146 
147 	/* assume all cpus from fam10h have mmconfig */
148         if (boot_cpu_data.x86 < 0x10)
149 		return NULL;
150 
151 	address = MSR_FAM10H_MMIO_CONF_BASE;
152 	rdmsrl(address, msr);
153 
154 	/* mmconfig is not enabled */
155 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156 		return NULL;
157 
158 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159 
160 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
162 
163 	res->flags = IORESOURCE_MEM;
164 	res->start = base;
165 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166 	return res;
167 }
168 
amd_get_subcaches(int cpu)169 int amd_get_subcaches(int cpu)
170 {
171 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172 	unsigned int mask;
173 	int cuid;
174 
175 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
176 		return 0;
177 
178 	pci_read_config_dword(link, 0x1d4, &mask);
179 
180 	cuid = cpu_data(cpu).compute_unit_id;
181 	return (mask >> (4 * cuid)) & 0xf;
182 }
183 
amd_set_subcaches(int cpu,unsigned long mask)184 int amd_set_subcaches(int cpu, unsigned long mask)
185 {
186 	static unsigned int reset, ban;
187 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
188 	unsigned int reg;
189 	int cuid;
190 
191 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
192 		return -EINVAL;
193 
194 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
195 	if (reset == 0) {
196 		pci_read_config_dword(nb->link, 0x1d4, &reset);
197 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
198 		ban &= 0x180000;
199 	}
200 
201 	/* deactivate BAN mode if any subcaches are to be disabled */
202 	if (mask != 0xf) {
203 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
204 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
205 	}
206 
207 	cuid = cpu_data(cpu).compute_unit_id;
208 	mask <<= 4 * cuid;
209 	mask |= (0xf ^ (1 << cuid)) << 26;
210 
211 	pci_write_config_dword(nb->link, 0x1d4, mask);
212 
213 	/* reset BAN mode if L3 partitioning returned to reset state */
214 	pci_read_config_dword(nb->link, 0x1d4, &reg);
215 	if (reg == reset) {
216 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
217 		reg &= ~0x180000;
218 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
219 	}
220 
221 	return 0;
222 }
223 
amd_cache_gart(void)224 static int amd_cache_gart(void)
225 {
226 	u16 i;
227 
228        if (!amd_nb_has_feature(AMD_NB_GART))
229                return 0;
230 
231        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
232        if (!flush_words) {
233                amd_northbridges.flags &= ~AMD_NB_GART;
234                return -ENOMEM;
235        }
236 
237        for (i = 0; i != amd_nb_num(); i++)
238                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
239                                      &flush_words[i]);
240 
241        return 0;
242 }
243 
amd_flush_garts(void)244 void amd_flush_garts(void)
245 {
246 	int flushed, i;
247 	unsigned long flags;
248 	static DEFINE_SPINLOCK(gart_lock);
249 
250 	if (!amd_nb_has_feature(AMD_NB_GART))
251 		return;
252 
253 	/* Avoid races between AGP and IOMMU. In theory it's not needed
254 	   but I'm not sure if the hardware won't lose flush requests
255 	   when another is pending. This whole thing is so expensive anyways
256 	   that it doesn't matter to serialize more. -AK */
257 	spin_lock_irqsave(&gart_lock, flags);
258 	flushed = 0;
259 	for (i = 0; i < amd_nb_num(); i++) {
260 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
261 				       flush_words[i] | 1);
262 		flushed++;
263 	}
264 	for (i = 0; i < amd_nb_num(); i++) {
265 		u32 w;
266 		/* Make sure the hardware actually executed the flush*/
267 		for (;;) {
268 			pci_read_config_dword(node_to_amd_nb(i)->misc,
269 					      0x9c, &w);
270 			if (!(w & 1))
271 				break;
272 			cpu_relax();
273 		}
274 	}
275 	spin_unlock_irqrestore(&gart_lock, flags);
276 	if (!flushed)
277 		pr_notice("nothing to flush?\n");
278 }
279 EXPORT_SYMBOL_GPL(amd_flush_garts);
280 
init_amd_nbs(void)281 static __init int init_amd_nbs(void)
282 {
283 	int err = 0;
284 
285 	err = amd_cache_northbridges();
286 
287 	if (err < 0)
288 		pr_notice("Cannot enumerate AMD northbridges\n");
289 
290 	if (amd_cache_gart() < 0)
291 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
292 
293 	return err;
294 }
295 
296 /* This has to go after the PCI subsystem */
297 fs_initcall(init_amd_nbs);
298