• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __ASM_ES7000_APIC_H
2 #define __ASM_ES7000_APIC_H
3 
4 #include <linux/gfp.h>
5 
6 #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
7 #define esr_disable (1)
8 
apic_id_registered(void)9 static inline int apic_id_registered(void)
10 {
11 	        return (1);
12 }
13 
target_cpus_cluster(void)14 static inline const cpumask_t *target_cpus_cluster(void)
15 {
16 	return &CPU_MASK_ALL;
17 }
18 
target_cpus(void)19 static inline const cpumask_t *target_cpus(void)
20 {
21 	return &cpumask_of_cpu(smp_processor_id());
22 }
23 
24 #define APIC_DFR_VALUE_CLUSTER		(APIC_DFR_CLUSTER)
25 #define INT_DELIVERY_MODE_CLUSTER	(dest_LowestPrio)
26 #define INT_DEST_MODE_CLUSTER		(1) /* logical delivery broadcast to all procs */
27 #define NO_BALANCE_IRQ_CLUSTER		(1)
28 
29 #define APIC_DFR_VALUE		(APIC_DFR_FLAT)
30 #define INT_DELIVERY_MODE	(dest_Fixed)
31 #define INT_DEST_MODE		(0)    /* phys delivery to target procs */
32 #define NO_BALANCE_IRQ		(0)
33 #undef  APIC_DEST_LOGICAL
34 #define APIC_DEST_LOGICAL	0x0
35 
check_apicid_used(physid_mask_t bitmap,int apicid)36 static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
37 {
38 	return 0;
39 }
check_apicid_present(int bit)40 static inline unsigned long check_apicid_present(int bit)
41 {
42 	return physid_isset(bit, phys_cpu_present_map);
43 }
44 
45 #define apicid_cluster(apicid) (apicid & 0xF0)
46 
calculate_ldr(int cpu)47 static inline unsigned long calculate_ldr(int cpu)
48 {
49 	unsigned long id;
50 	id = xapic_phys_to_log_apicid(cpu);
51 	return (SET_APIC_LOGICAL_ID(id));
52 }
53 
54 /*
55  * Set up the logical destination ID.
56  *
57  * Intel recommends to set DFR, LdR and TPR before enabling
58  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
59  * document number 292116).  So here it goes...
60  */
init_apic_ldr_cluster(void)61 static inline void init_apic_ldr_cluster(void)
62 {
63 	unsigned long val;
64 	int cpu = smp_processor_id();
65 
66 	apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
67 	val = calculate_ldr(cpu);
68 	apic_write(APIC_LDR, val);
69 }
70 
init_apic_ldr(void)71 static inline void init_apic_ldr(void)
72 {
73 	unsigned long val;
74 	int cpu = smp_processor_id();
75 
76 	apic_write(APIC_DFR, APIC_DFR_VALUE);
77 	val = calculate_ldr(cpu);
78 	apic_write(APIC_LDR, val);
79 }
80 
81 extern int apic_version [MAX_APICS];
setup_apic_routing(void)82 static inline void setup_apic_routing(void)
83 {
84 	int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
85 	printk("Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
86 		(apic_version[apic] == 0x14) ?
87 			"Physical Cluster" : "Logical Cluster",
88 			nr_ioapics, cpus_addr(*target_cpus())[0]);
89 }
90 
multi_timer_check(int apic,int irq)91 static inline int multi_timer_check(int apic, int irq)
92 {
93 	return 0;
94 }
95 
apicid_to_node(int logical_apicid)96 static inline int apicid_to_node(int logical_apicid)
97 {
98 	return 0;
99 }
100 
101 
cpu_present_to_apicid(int mps_cpu)102 static inline int cpu_present_to_apicid(int mps_cpu)
103 {
104 	if (!mps_cpu)
105 		return boot_cpu_physical_apicid;
106 	else if (mps_cpu < nr_cpu_ids)
107 		return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
108 	else
109 		return BAD_APICID;
110 }
111 
apicid_to_cpu_present(int phys_apicid)112 static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
113 {
114 	static int id = 0;
115 	physid_mask_t mask;
116 	mask = physid_mask_of_physid(id);
117 	++id;
118 	return mask;
119 }
120 
121 extern u8 cpu_2_logical_apicid[];
122 /* Mapping from cpu number to logical apicid */
cpu_to_logical_apicid(int cpu)123 static inline int cpu_to_logical_apicid(int cpu)
124 {
125 #ifdef CONFIG_SMP
126 	if (cpu >= nr_cpu_ids)
127 		return BAD_APICID;
128 	return (int)cpu_2_logical_apicid[cpu];
129 #else
130 	return logical_smp_processor_id();
131 #endif
132 }
133 
ioapic_phys_id_map(physid_mask_t phys_map)134 static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
135 {
136 	/* For clustered we don't have a good way to do this yet - hack */
137 	return physids_promote(0xff);
138 }
139 
140 
setup_portio_remap(void)141 static inline void setup_portio_remap(void)
142 {
143 }
144 
145 extern unsigned int boot_cpu_physical_apicid;
check_phys_apicid_present(int cpu_physical_apicid)146 static inline int check_phys_apicid_present(int cpu_physical_apicid)
147 {
148 	boot_cpu_physical_apicid = read_apic_id();
149 	return (1);
150 }
151 
152 static inline unsigned int
cpu_mask_to_apicid_cluster(const struct cpumask * cpumask)153 cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
154 {
155 	int num_bits_set;
156 	int cpus_found = 0;
157 	int cpu;
158 	int apicid;
159 
160 	num_bits_set = cpumask_weight(cpumask);
161 	/* Return id to all */
162 	if (num_bits_set == nr_cpu_ids)
163 		return 0xFF;
164 	/*
165 	 * The cpus in the mask must all be on the apic cluster.  If are not
166 	 * on the same apicid cluster return default value of TARGET_CPUS.
167 	 */
168 	cpu = cpumask_first(cpumask);
169 	apicid = cpu_to_logical_apicid(cpu);
170 	while (cpus_found < num_bits_set) {
171 		if (cpumask_test_cpu(cpu, cpumask)) {
172 			int new_apicid = cpu_to_logical_apicid(cpu);
173 			if (apicid_cluster(apicid) !=
174 					apicid_cluster(new_apicid)){
175 				printk ("%s: Not a valid mask!\n", __func__);
176 				return 0xFF;
177 			}
178 			apicid = new_apicid;
179 			cpus_found++;
180 		}
181 		cpu++;
182 	}
183 	return apicid;
184 }
185 
cpu_mask_to_apicid(const cpumask_t * cpumask)186 static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
187 {
188 	int num_bits_set;
189 	int cpus_found = 0;
190 	int cpu;
191 	int apicid;
192 
193 	num_bits_set = cpus_weight(*cpumask);
194 	/* Return id to all */
195 	if (num_bits_set == nr_cpu_ids)
196 		return cpu_to_logical_apicid(0);
197 	/*
198 	 * The cpus in the mask must all be on the apic cluster.  If are not
199 	 * on the same apicid cluster return default value of TARGET_CPUS.
200 	 */
201 	cpu = first_cpu(*cpumask);
202 	apicid = cpu_to_logical_apicid(cpu);
203 	while (cpus_found < num_bits_set) {
204 		if (cpu_isset(cpu, *cpumask)) {
205 			int new_apicid = cpu_to_logical_apicid(cpu);
206 			if (apicid_cluster(apicid) !=
207 					apicid_cluster(new_apicid)){
208 				printk ("%s: Not a valid mask!\n", __func__);
209 				return cpu_to_logical_apicid(0);
210 			}
211 			apicid = new_apicid;
212 			cpus_found++;
213 		}
214 		cpu++;
215 	}
216 	return apicid;
217 }
218 
219 
cpu_mask_to_apicid_and(const struct cpumask * inmask,const struct cpumask * andmask)220 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
221 						  const struct cpumask *andmask)
222 {
223 	int apicid = cpu_to_logical_apicid(0);
224 	cpumask_var_t cpumask;
225 
226 	if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
227 		return apicid;
228 
229 	cpumask_and(cpumask, inmask, andmask);
230 	cpumask_and(cpumask, cpumask, cpu_online_mask);
231 	apicid = cpu_mask_to_apicid(cpumask);
232 
233 	free_cpumask_var(cpumask);
234 	return apicid;
235 }
236 
phys_pkg_id(u32 cpuid_apic,int index_msb)237 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
238 {
239 	return cpuid_apic >> index_msb;
240 }
241 
242 #endif /* __ASM_ES7000_APIC_H */
243