• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <asm/amd_nb.h>
15 
16 static u32 *flush_words;
17 
18 const struct pci_device_id amd_nb_misc_ids[] = {
19 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
24 	{}
25 };
26 EXPORT_SYMBOL(amd_nb_misc_ids);
27 
28 static const struct pci_device_id amd_nb_link_ids[] = {
29 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
30 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
31 	{}
32 };
33 
34 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
35 	{ 0x00, 0x18, 0x20 },
36 	{ 0xff, 0x00, 0x20 },
37 	{ 0xfe, 0x00, 0x20 },
38 	{ }
39 };
40 
41 struct amd_northbridge_info amd_northbridges;
42 EXPORT_SYMBOL(amd_northbridges);
43 
next_northbridge(struct pci_dev * dev,const struct pci_device_id * ids)44 static struct pci_dev *next_northbridge(struct pci_dev *dev,
45 					const struct pci_device_id *ids)
46 {
47 	do {
48 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
49 		if (!dev)
50 			break;
51 	} while (!pci_match_id(ids, dev));
52 	return dev;
53 }
54 
amd_cache_northbridges(void)55 int amd_cache_northbridges(void)
56 {
57 	u16 i = 0;
58 	struct amd_northbridge *nb;
59 	struct pci_dev *misc, *link;
60 
61 	if (amd_nb_num())
62 		return 0;
63 
64 	misc = NULL;
65 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
66 		i++;
67 
68 	if (i == 0)
69 		return 0;
70 
71 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
72 	if (!nb)
73 		return -ENOMEM;
74 
75 	amd_northbridges.nb = nb;
76 	amd_northbridges.num = i;
77 
78 	link = misc = NULL;
79 	for (i = 0; i != amd_nb_num(); i++) {
80 		node_to_amd_nb(i)->misc = misc =
81 			next_northbridge(misc, amd_nb_misc_ids);
82 		node_to_amd_nb(i)->link = link =
83 			next_northbridge(link, amd_nb_link_ids);
84         }
85 
86 	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
87 	    boot_cpu_data.x86 == 0x15)
88 		amd_northbridges.flags |= AMD_NB_GART;
89 
90 	/*
91 	 * Some CPU families support L3 Cache Index Disable. There are some
92 	 * limitations because of E382 and E388 on family 0x10.
93 	 */
94 	if (boot_cpu_data.x86 == 0x10 &&
95 	    boot_cpu_data.x86_model >= 0x8 &&
96 	    (boot_cpu_data.x86_model > 0x9 ||
97 	     boot_cpu_data.x86_mask >= 0x1))
98 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
99 
100 	if (boot_cpu_data.x86 == 0x15)
101 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
102 
103 	/* L3 cache partitioning is supported on family 0x15 */
104 	if (boot_cpu_data.x86 == 0x15)
105 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
106 
107 	return 0;
108 }
109 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
110 
111 /*
112  * Ignores subdevice/subvendor but as far as I can figure out
113  * they're useless anyways
114  */
early_is_amd_nb(u32 device)115 bool __init early_is_amd_nb(u32 device)
116 {
117 	const struct pci_device_id *id;
118 	u32 vendor = device & 0xffff;
119 
120 	device >>= 16;
121 	for (id = amd_nb_misc_ids; id->vendor; id++)
122 		if (vendor == id->vendor && device == id->device)
123 			return true;
124 	return false;
125 }
126 
amd_get_mmconfig_range(struct resource * res)127 struct resource *amd_get_mmconfig_range(struct resource *res)
128 {
129 	u32 address;
130 	u64 base, msr;
131 	unsigned segn_busn_bits;
132 
133 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
134 		return NULL;
135 
136 	/* assume all cpus from fam10h have mmconfig */
137         if (boot_cpu_data.x86 < 0x10)
138 		return NULL;
139 
140 	address = MSR_FAM10H_MMIO_CONF_BASE;
141 	rdmsrl(address, msr);
142 
143 	/* mmconfig is not enabled */
144 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
145 		return NULL;
146 
147 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
148 
149 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
150 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
151 
152 	res->flags = IORESOURCE_MEM;
153 	res->start = base;
154 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
155 	return res;
156 }
157 
amd_get_subcaches(int cpu)158 int amd_get_subcaches(int cpu)
159 {
160 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
161 	unsigned int mask;
162 	int cuid;
163 
164 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
165 		return 0;
166 
167 	pci_read_config_dword(link, 0x1d4, &mask);
168 
169 	cuid = cpu_data(cpu).compute_unit_id;
170 	return (mask >> (4 * cuid)) & 0xf;
171 }
172 
amd_set_subcaches(int cpu,int mask)173 int amd_set_subcaches(int cpu, int mask)
174 {
175 	static unsigned int reset, ban;
176 	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
177 	unsigned int reg;
178 	int cuid;
179 
180 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
181 		return -EINVAL;
182 
183 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
184 	if (reset == 0) {
185 		pci_read_config_dword(nb->link, 0x1d4, &reset);
186 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
187 		ban &= 0x180000;
188 	}
189 
190 	/* deactivate BAN mode if any subcaches are to be disabled */
191 	if (mask != 0xf) {
192 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
193 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
194 	}
195 
196 	cuid = cpu_data(cpu).compute_unit_id;
197 	mask <<= 4 * cuid;
198 	mask |= (0xf ^ (1 << cuid)) << 26;
199 
200 	pci_write_config_dword(nb->link, 0x1d4, mask);
201 
202 	/* reset BAN mode if L3 partitioning returned to reset state */
203 	pci_read_config_dword(nb->link, 0x1d4, &reg);
204 	if (reg == reset) {
205 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
206 		reg &= ~0x180000;
207 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
208 	}
209 
210 	return 0;
211 }
212 
amd_cache_gart(void)213 static int amd_cache_gart(void)
214 {
215 	u16 i;
216 
217        if (!amd_nb_has_feature(AMD_NB_GART))
218                return 0;
219 
220        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
221        if (!flush_words) {
222                amd_northbridges.flags &= ~AMD_NB_GART;
223                return -ENOMEM;
224        }
225 
226        for (i = 0; i != amd_nb_num(); i++)
227                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
228                                      &flush_words[i]);
229 
230        return 0;
231 }
232 
amd_flush_garts(void)233 void amd_flush_garts(void)
234 {
235 	int flushed, i;
236 	unsigned long flags;
237 	static DEFINE_SPINLOCK(gart_lock);
238 
239 	if (!amd_nb_has_feature(AMD_NB_GART))
240 		return;
241 
242 	/* Avoid races between AGP and IOMMU. In theory it's not needed
243 	   but I'm not sure if the hardware won't lose flush requests
244 	   when another is pending. This whole thing is so expensive anyways
245 	   that it doesn't matter to serialize more. -AK */
246 	spin_lock_irqsave(&gart_lock, flags);
247 	flushed = 0;
248 	for (i = 0; i < amd_nb_num(); i++) {
249 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
250 				       flush_words[i] | 1);
251 		flushed++;
252 	}
253 	for (i = 0; i < amd_nb_num(); i++) {
254 		u32 w;
255 		/* Make sure the hardware actually executed the flush*/
256 		for (;;) {
257 			pci_read_config_dword(node_to_amd_nb(i)->misc,
258 					      0x9c, &w);
259 			if (!(w & 1))
260 				break;
261 			cpu_relax();
262 		}
263 	}
264 	spin_unlock_irqrestore(&gart_lock, flags);
265 	if (!flushed)
266 		pr_notice("nothing to flush?\n");
267 }
268 EXPORT_SYMBOL_GPL(amd_flush_garts);
269 
init_amd_nbs(void)270 static __init int init_amd_nbs(void)
271 {
272 	int err = 0;
273 
274 	err = amd_cache_northbridges();
275 
276 	if (err < 0)
277 		pr_notice("Cannot enumerate AMD northbridges\n");
278 
279 	if (amd_cache_gart() < 0)
280 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
281 
282 	return err;
283 }
284 
285 /* This has to go after the PCI subsystem */
286 fs_initcall(init_amd_nbs);
287