• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2    because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pat.h>
15 #include "mtrr.h"
16 
17 struct fixed_range_block {
18 	int base_msr; /* start address of an MTRR block */
19 	int ranges;   /* number of MTRRs in this block  */
20 };
21 
22 static struct fixed_range_block fixed_range_blocks[] = {
23 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
24 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
25 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
26 	{}
27 };
28 
29 static unsigned long smp_changes_mask;
30 static int mtrr_state_set;
31 u64 mtrr_tom2;
32 
33 struct mtrr_state_type mtrr_state = {};
34 EXPORT_SYMBOL_GPL(mtrr_state);
35 
36 static int __initdata mtrr_show;
mtrr_debug(char * opt)37 static int __init mtrr_debug(char *opt)
38 {
39 	mtrr_show = 1;
40 	return 0;
41 }
42 early_param("mtrr.show", mtrr_debug);
43 
44 /*
45  * Returns the effective MTRR type for the region
46  * Error returns:
47  * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
48  * - 0xFF - when MTRR is not enabled
49  */
mtrr_type_lookup(u64 start,u64 end)50 u8 mtrr_type_lookup(u64 start, u64 end)
51 {
52 	int i;
53 	u64 base, mask;
54 	u8 prev_match, curr_match;
55 
56 	if (!mtrr_state_set)
57 		return 0xFF;
58 
59 	if (!mtrr_state.enabled)
60 		return 0xFF;
61 
62 	/* Make end inclusive end, instead of exclusive */
63 	end--;
64 
65 	/* Look in fixed ranges. Just return the type as per start */
66 	if (mtrr_state.have_fixed && (start < 0x100000)) {
67 		int idx;
68 
69 		if (start < 0x80000) {
70 			idx = 0;
71 			idx += (start >> 16);
72 			return mtrr_state.fixed_ranges[idx];
73 		} else if (start < 0xC0000) {
74 			idx = 1 * 8;
75 			idx += ((start - 0x80000) >> 14);
76 			return mtrr_state.fixed_ranges[idx];
77 		} else if (start < 0x1000000) {
78 			idx = 3 * 8;
79 			idx += ((start - 0xC0000) >> 12);
80 			return mtrr_state.fixed_ranges[idx];
81 		}
82 	}
83 
84 	/*
85 	 * Look in variable ranges
86 	 * Look of multiple ranges matching this address and pick type
87 	 * as per MTRR precedence
88 	 */
89 	if (!(mtrr_state.enabled & 2)) {
90 		return mtrr_state.def_type;
91 	}
92 
93 	prev_match = 0xFF;
94 	for (i = 0; i < num_var_ranges; ++i) {
95 		unsigned short start_state, end_state;
96 
97 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
98 			continue;
99 
100 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
101 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
102 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
103 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
104 
105 		start_state = ((start & mask) == (base & mask));
106 		end_state = ((end & mask) == (base & mask));
107 		if (start_state != end_state)
108 			return 0xFE;
109 
110 		if ((start & mask) != (base & mask)) {
111 			continue;
112 		}
113 
114 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
115 		if (prev_match == 0xFF) {
116 			prev_match = curr_match;
117 			continue;
118 		}
119 
120 		if (prev_match == MTRR_TYPE_UNCACHABLE ||
121 		    curr_match == MTRR_TYPE_UNCACHABLE) {
122 			return MTRR_TYPE_UNCACHABLE;
123 		}
124 
125 		if ((prev_match == MTRR_TYPE_WRBACK &&
126 		     curr_match == MTRR_TYPE_WRTHROUGH) ||
127 		    (prev_match == MTRR_TYPE_WRTHROUGH &&
128 		     curr_match == MTRR_TYPE_WRBACK)) {
129 			prev_match = MTRR_TYPE_WRTHROUGH;
130 			curr_match = MTRR_TYPE_WRTHROUGH;
131 		}
132 
133 		if (prev_match != curr_match) {
134 			return MTRR_TYPE_UNCACHABLE;
135 		}
136 	}
137 
138 	if (mtrr_tom2) {
139 		if (start >= (1ULL<<32) && (end < mtrr_tom2))
140 			return MTRR_TYPE_WRBACK;
141 	}
142 
143 	if (prev_match != 0xFF)
144 		return prev_match;
145 
146 	return mtrr_state.def_type;
147 }
148 
149 /*  Get the MSR pair relating to a var range  */
150 static void
get_mtrr_var_range(unsigned int index,struct mtrr_var_range * vr)151 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
152 {
153 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
154 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
155 }
156 
157 /*  fill the MSR pair relating to a var range  */
fill_mtrr_var_range(unsigned int index,u32 base_lo,u32 base_hi,u32 mask_lo,u32 mask_hi)158 void fill_mtrr_var_range(unsigned int index,
159 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
160 {
161 	struct mtrr_var_range *vr;
162 
163 	vr = mtrr_state.var_ranges;
164 
165 	vr[index].base_lo = base_lo;
166 	vr[index].base_hi = base_hi;
167 	vr[index].mask_lo = mask_lo;
168 	vr[index].mask_hi = mask_hi;
169 }
170 
171 static void
get_fixed_ranges(mtrr_type * frs)172 get_fixed_ranges(mtrr_type * frs)
173 {
174 	unsigned int *p = (unsigned int *) frs;
175 	int i;
176 
177 	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
178 
179 	for (i = 0; i < 2; i++)
180 		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
181 	for (i = 0; i < 8; i++)
182 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
183 }
184 
mtrr_save_fixed_ranges(void * info)185 void mtrr_save_fixed_ranges(void *info)
186 {
187 	if (cpu_has_mtrr)
188 		get_fixed_ranges(mtrr_state.fixed_ranges);
189 }
190 
print_fixed(unsigned base,unsigned step,const mtrr_type * types)191 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
192 {
193 	unsigned i;
194 
195 	for (i = 0; i < 8; ++i, ++types, base += step)
196 		printk(KERN_INFO "MTRR %05X-%05X %s\n",
197 			base, base + step - 1, mtrr_attrib_to_str(*types));
198 }
199 
200 static void prepare_set(void);
201 static void post_set(void);
202 
203 /*  Grab all of the MTRR state for this CPU into *state  */
get_mtrr_state(void)204 void __init get_mtrr_state(void)
205 {
206 	unsigned int i;
207 	struct mtrr_var_range *vrs;
208 	unsigned lo, dummy;
209 	unsigned long flags;
210 
211 	vrs = mtrr_state.var_ranges;
212 
213 	rdmsr(MTRRcap_MSR, lo, dummy);
214 	mtrr_state.have_fixed = (lo >> 8) & 1;
215 
216 	for (i = 0; i < num_var_ranges; i++)
217 		get_mtrr_var_range(i, &vrs[i]);
218 	if (mtrr_state.have_fixed)
219 		get_fixed_ranges(mtrr_state.fixed_ranges);
220 
221 	rdmsr(MTRRdefType_MSR, lo, dummy);
222 	mtrr_state.def_type = (lo & 0xff);
223 	mtrr_state.enabled = (lo & 0xc00) >> 10;
224 
225 	if (amd_special_default_mtrr()) {
226 		unsigned low, high;
227 		/* TOP_MEM2 */
228 		rdmsr(MSR_K8_TOP_MEM2, low, high);
229 		mtrr_tom2 = high;
230 		mtrr_tom2 <<= 32;
231 		mtrr_tom2 |= low;
232 		mtrr_tom2 &= 0xffffff800000ULL;
233 	}
234 	if (mtrr_show) {
235 		int high_width;
236 
237 		printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
238 		if (mtrr_state.have_fixed) {
239 			printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
240 			       mtrr_state.enabled & 1 ? "en" : "dis");
241 			print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
242 			for (i = 0; i < 2; ++i)
243 				print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
244 			for (i = 0; i < 8; ++i)
245 				print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
246 		}
247 		printk(KERN_INFO "MTRR variable ranges %sabled:\n",
248 		       mtrr_state.enabled & 2 ? "en" : "dis");
249 		high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
250 		for (i = 0; i < num_var_ranges; ++i) {
251 			if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
252 				printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
253 				       i,
254 				       high_width,
255 				       mtrr_state.var_ranges[i].base_hi,
256 				       mtrr_state.var_ranges[i].base_lo >> 12,
257 				       high_width,
258 				       mtrr_state.var_ranges[i].mask_hi,
259 				       mtrr_state.var_ranges[i].mask_lo >> 12,
260 				       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
261 			else
262 				printk(KERN_INFO "MTRR %u disabled\n", i);
263 		}
264 		if (mtrr_tom2) {
265 			printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
266 					  mtrr_tom2, mtrr_tom2>>20);
267 		}
268 	}
269 	mtrr_state_set = 1;
270 
271 	/* PAT setup for BP. We need to go through sync steps here */
272 	local_irq_save(flags);
273 	prepare_set();
274 
275 	pat_init();
276 
277 	post_set();
278 	local_irq_restore(flags);
279 
280 }
281 
282 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
mtrr_state_warn(void)283 void __init mtrr_state_warn(void)
284 {
285 	unsigned long mask = smp_changes_mask;
286 
287 	if (!mask)
288 		return;
289 	if (mask & MTRR_CHANGE_MASK_FIXED)
290 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
291 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
292 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
293 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
294 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
295 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
296 	printk(KERN_INFO "mtrr: corrected configuration.\n");
297 }
298 
299 /* Doesn't attempt to pass an error out to MTRR users
300    because it's quite complicated in some cases and probably not
301    worth it because the best error handling is to ignore it. */
mtrr_wrmsr(unsigned msr,unsigned a,unsigned b)302 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
303 {
304 	if (wrmsr_safe(msr, a, b) < 0)
305 		printk(KERN_ERR
306 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
307 			smp_processor_id(), msr, a, b);
308 }
309 
310 /**
311  * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
312  * see AMD publication no. 24593, chapter 3.2.1 for more information
313  */
k8_enable_fixed_iorrs(void)314 static inline void k8_enable_fixed_iorrs(void)
315 {
316 	unsigned lo, hi;
317 
318 	rdmsr(MSR_K8_SYSCFG, lo, hi);
319 	mtrr_wrmsr(MSR_K8_SYSCFG, lo
320 				| K8_MTRRFIXRANGE_DRAM_ENABLE
321 				| K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
322 }
323 
324 /**
325  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
326  * @msr: MSR address of the MTTR which should be checked and updated
327  * @changed: pointer which indicates whether the MTRR needed to be changed
328  * @msrwords: pointer to the MSR values which the MSR should have
329  *
330  * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
331  * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
332  */
set_fixed_range(int msr,bool * changed,unsigned int * msrwords)333 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
334 {
335 	unsigned lo, hi;
336 
337 	rdmsr(msr, lo, hi);
338 
339 	if (lo != msrwords[0] || hi != msrwords[1]) {
340 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
341 		    (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
342 		    ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
343 			k8_enable_fixed_iorrs();
344 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
345 		*changed = true;
346 	}
347 }
348 
349 /**
350  * generic_get_free_region - Get a free MTRR.
351  * @base: The starting (base) address of the region.
352  * @size: The size (in bytes) of the region.
353  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
354  *
355  * Returns: The index of the region on success, else negative on error.
356  */
generic_get_free_region(unsigned long base,unsigned long size,int replace_reg)357 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
358 {
359 	int i, max;
360 	mtrr_type ltype;
361 	unsigned long lbase, lsize;
362 
363 	max = num_var_ranges;
364 	if (replace_reg >= 0 && replace_reg < max)
365 		return replace_reg;
366 	for (i = 0; i < max; ++i) {
367 		mtrr_if->get(i, &lbase, &lsize, &ltype);
368 		if (lsize == 0)
369 			return i;
370 	}
371 	return -ENOSPC;
372 }
373 
generic_get_mtrr(unsigned int reg,unsigned long * base,unsigned long * size,mtrr_type * type)374 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
375 			     unsigned long *size, mtrr_type *type)
376 {
377 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
378 	unsigned int tmp, hi;
379 
380 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
381 	if ((mask_lo & 0x800) == 0) {
382 		/*  Invalid (i.e. free) range  */
383 		*base = 0;
384 		*size = 0;
385 		*type = 0;
386 		return;
387 	}
388 
389 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
390 
391 	/* Work out the shifted address mask. */
392 	tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
393 	mask_lo = size_or_mask | tmp;
394 	/* Expand tmp with high bits to all 1s*/
395 	hi = fls(tmp);
396 	if (hi > 0) {
397 		tmp |= ~((1<<(hi - 1)) - 1);
398 
399 		if (tmp != mask_lo) {
400 			WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
401 			mask_lo = tmp;
402 		}
403 	}
404 
405 	/* This works correctly if size is a power of two, i.e. a
406 	   contiguous range. */
407 	*size = -mask_lo;
408 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
409 	*type = base_lo & 0xff;
410 }
411 
412 /**
413  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
414  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
415  */
set_fixed_ranges(mtrr_type * frs)416 static int set_fixed_ranges(mtrr_type * frs)
417 {
418 	unsigned long long *saved = (unsigned long long *) frs;
419 	bool changed = false;
420 	int block=-1, range;
421 
422 	while (fixed_range_blocks[++block].ranges)
423 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
424 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
425 		    &changed, (unsigned int *) saved++);
426 
427 	return changed;
428 }
429 
430 /*  Set the MSR pair relating to a var range. Returns TRUE if
431     changes are made  */
set_mtrr_var_ranges(unsigned int index,struct mtrr_var_range * vr)432 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
433 {
434 	unsigned int lo, hi;
435 	bool changed = false;
436 
437 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
438 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
439 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
440 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
441 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
442 		changed = true;
443 	}
444 
445 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
446 
447 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
448 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
449 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
450 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
451 		changed = true;
452 	}
453 	return changed;
454 }
455 
456 static u32 deftype_lo, deftype_hi;
457 
458 /**
459  * set_mtrr_state - Set the MTRR state for this CPU.
460  *
461  * NOTE: The CPU must already be in a safe state for MTRR changes.
462  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
463  */
set_mtrr_state(void)464 static unsigned long set_mtrr_state(void)
465 {
466 	unsigned int i;
467 	unsigned long change_mask = 0;
468 
469 	for (i = 0; i < num_var_ranges; i++)
470 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
471 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
472 
473 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
474 		change_mask |= MTRR_CHANGE_MASK_FIXED;
475 
476 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
477 	   so to set it we fiddle with the saved value  */
478 	if ((deftype_lo & 0xff) != mtrr_state.def_type
479 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
480 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
481 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
482 	}
483 
484 	return change_mask;
485 }
486 
487 
488 static unsigned long cr4 = 0;
489 static DEFINE_SPINLOCK(set_atomicity_lock);
490 
491 /*
492  * Since we are disabling the cache don't allow any interrupts - they
493  * would run extremely slow and would only increase the pain.  The caller must
494  * ensure that local interrupts are disabled and are reenabled after post_set()
495  * has been called.
496  */
497 
prepare_set(void)498 static void prepare_set(void) __acquires(set_atomicity_lock)
499 {
500 	unsigned long cr0;
501 
502 	/*  Note that this is not ideal, since the cache is only flushed/disabled
503 	   for this CPU while the MTRRs are changed, but changing this requires
504 	   more invasive changes to the way the kernel boots  */
505 
506 	spin_lock(&set_atomicity_lock);
507 
508 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
509 	cr0 = read_cr0() | X86_CR0_CD;
510 	write_cr0(cr0);
511 	wbinvd();
512 
513 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
514 	if ( cpu_has_pge ) {
515 		cr4 = read_cr4();
516 		write_cr4(cr4 & ~X86_CR4_PGE);
517 	}
518 
519 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
520 	__flush_tlb();
521 
522 	/*  Save MTRR state */
523 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
524 
525 	/*  Disable MTRRs, and set the default type to uncached  */
526 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
527 }
528 
post_set(void)529 static void post_set(void) __releases(set_atomicity_lock)
530 {
531 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
532 	__flush_tlb();
533 
534 	/* Intel (P6) standard MTRRs */
535 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
536 
537 	/*  Enable caches  */
538 	write_cr0(read_cr0() & 0xbfffffff);
539 
540 	/*  Restore value of CR4  */
541 	if ( cpu_has_pge )
542 		write_cr4(cr4);
543 	spin_unlock(&set_atomicity_lock);
544 }
545 
generic_set_all(void)546 static void generic_set_all(void)
547 {
548 	unsigned long mask, count;
549 	unsigned long flags;
550 
551 	local_irq_save(flags);
552 	prepare_set();
553 
554 	/* Actually set the state */
555 	mask = set_mtrr_state();
556 
557 	/* also set PAT */
558 	pat_init();
559 
560 	post_set();
561 	local_irq_restore(flags);
562 
563 	/*  Use the atomic bitops to update the global mask  */
564 	for (count = 0; count < sizeof mask * 8; ++count) {
565 		if (mask & 0x01)
566 			set_bit(count, &smp_changes_mask);
567 		mask >>= 1;
568 	}
569 
570 }
571 
generic_set_mtrr(unsigned int reg,unsigned long base,unsigned long size,mtrr_type type)572 static void generic_set_mtrr(unsigned int reg, unsigned long base,
573 			     unsigned long size, mtrr_type type)
574 /*  [SUMMARY] Set variable MTRR register on the local CPU.
575     <reg> The register to set.
576     <base> The base address of the region.
577     <size> The size of the region. If this is 0 the region is disabled.
578     <type> The type of the region.
579     [RETURNS] Nothing.
580 */
581 {
582 	unsigned long flags;
583 	struct mtrr_var_range *vr;
584 
585 	vr = &mtrr_state.var_ranges[reg];
586 
587 	local_irq_save(flags);
588 	prepare_set();
589 
590 	if (size == 0) {
591 		/* The invalid bit is kept in the mask, so we simply clear the
592 		   relevant mask register to disable a range. */
593 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
594 		memset(vr, 0, sizeof(struct mtrr_var_range));
595 	} else {
596 		vr->base_lo = base << PAGE_SHIFT | type;
597 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
598 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
599 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
600 
601 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
602 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
603 	}
604 
605 	post_set();
606 	local_irq_restore(flags);
607 }
608 
generic_validate_add_page(unsigned long base,unsigned long size,unsigned int type)609 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
610 {
611 	unsigned long lbase, last;
612 
613 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
614 	    and not touch 0x70000000->0x7003FFFF */
615 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
616 	    boot_cpu_data.x86_model == 1 &&
617 	    boot_cpu_data.x86_mask <= 7) {
618 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
619 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
620 			return -EINVAL;
621 		}
622 		if (!(base + size < 0x70000 || base > 0x7003F) &&
623 		    (type == MTRR_TYPE_WRCOMB
624 		     || type == MTRR_TYPE_WRBACK)) {
625 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
626 			return -EINVAL;
627 		}
628 	}
629 
630 	/*  Check upper bits of base and last are equal and lower bits are 0
631 	    for base and 1 for last  */
632 	last = base + size - 1;
633 	for (lbase = base; !(lbase & 1) && (last & 1);
634 	     lbase = lbase >> 1, last = last >> 1) ;
635 	if (lbase != last) {
636 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
637 		       base, size);
638 		return -EINVAL;
639 	}
640 	return 0;
641 }
642 
643 
generic_have_wrcomb(void)644 static int generic_have_wrcomb(void)
645 {
646 	unsigned long config, dummy;
647 	rdmsr(MTRRcap_MSR, config, dummy);
648 	return (config & (1 << 10));
649 }
650 
positive_have_wrcomb(void)651 int positive_have_wrcomb(void)
652 {
653 	return 1;
654 }
655 
656 /* generic structure...
657  */
658 struct mtrr_ops generic_mtrr_ops = {
659 	.use_intel_if      = 1,
660 	.set_all	   = generic_set_all,
661 	.get               = generic_get_mtrr,
662 	.get_free_region   = generic_get_free_region,
663 	.set               = generic_set_mtrr,
664 	.validate_add_page = generic_validate_add_page,
665 	.have_wrcomb       = generic_have_wrcomb,
666 };
667