• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/threads.h>
2 #include <linux/cpumask.h>
3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
6 #include <linux/init.h>
7 #include <linux/dmar.h>
8 
9 #include <asm/smp.h>
10 #include <asm/ipi.h>
11 #include <asm/genapic.h>
12 
13 DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14 
x2apic_acpi_madt_oem_check(char * oem_id,char * oem_table_id)15 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
16 {
17 	if (cpu_has_x2apic)
18 		return 1;
19 
20 	return 0;
21 }
22 
23 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
24 
x2apic_target_cpus(void)25 static const struct cpumask *x2apic_target_cpus(void)
26 {
27 	return cpumask_of(0);
28 }
29 
30 /*
31  * for now each logical cpu is in its own vector allocation domain.
32  */
x2apic_vector_allocation_domain(int cpu,struct cpumask * retmask)33 static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
34 {
35 	cpumask_clear(retmask);
36 	cpumask_set_cpu(cpu, retmask);
37 }
38 
__x2apic_send_IPI_dest(unsigned int apicid,int vector,unsigned int dest)39 static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
40 				   unsigned int dest)
41 {
42 	unsigned long cfg;
43 
44 	cfg = __prepare_ICR(0, vector, dest);
45 
46 	/*
47 	 * send the IPI.
48 	 */
49 	x2apic_icr_write(cfg, apicid);
50 }
51 
52 /*
53  * for now, we send the IPI's one by one in the cpumask.
54  * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
55  * at once. We have 16 cpu's in a cluster. This will minimize IPI register
56  * writes.
57  */
x2apic_send_IPI_mask(const struct cpumask * mask,int vector)58 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59 {
60 	unsigned long flags;
61 	unsigned long query_cpu;
62 
63 	local_irq_save(flags);
64 	for_each_cpu(query_cpu, mask)
65 		__x2apic_send_IPI_dest(
66 			per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67 			vector, APIC_DEST_LOGICAL);
68 	local_irq_restore(flags);
69 }
70 
x2apic_send_IPI_mask_allbutself(const struct cpumask * mask,int vector)71 static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
72 					    int vector)
73 {
74 	unsigned long flags;
75 	unsigned long query_cpu;
76 	unsigned long this_cpu = smp_processor_id();
77 
78 	local_irq_save(flags);
79 	for_each_cpu(query_cpu, mask)
80 		if (query_cpu != this_cpu)
81 			__x2apic_send_IPI_dest(
82 				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
83 				vector, APIC_DEST_LOGICAL);
84 	local_irq_restore(flags);
85 }
86 
x2apic_send_IPI_allbutself(int vector)87 static void x2apic_send_IPI_allbutself(int vector)
88 {
89 	unsigned long flags;
90 	unsigned long query_cpu;
91 	unsigned long this_cpu = smp_processor_id();
92 
93 	local_irq_save(flags);
94 	for_each_online_cpu(query_cpu)
95 		if (query_cpu != this_cpu)
96 			__x2apic_send_IPI_dest(
97 				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
98 				vector, APIC_DEST_LOGICAL);
99 	local_irq_restore(flags);
100 }
101 
x2apic_send_IPI_all(int vector)102 static void x2apic_send_IPI_all(int vector)
103 {
104 	x2apic_send_IPI_mask(cpu_online_mask, vector);
105 }
106 
x2apic_apic_id_registered(void)107 static int x2apic_apic_id_registered(void)
108 {
109 	return 1;
110 }
111 
x2apic_cpu_mask_to_apicid(const struct cpumask * cpumask)112 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
113 {
114 	int cpu;
115 
116 	/*
117 	 * We're using fixed IRQ delivery, can only return one logical APIC ID.
118 	 * May as well be the first.
119 	 */
120 	cpu = cpumask_first(cpumask);
121 	if ((unsigned)cpu < nr_cpu_ids)
122 		return per_cpu(x86_cpu_to_logical_apicid, cpu);
123 	else
124 		return BAD_APICID;
125 }
126 
x2apic_cpu_mask_to_apicid_and(const struct cpumask * cpumask,const struct cpumask * andmask)127 static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
128 						  const struct cpumask *andmask)
129 {
130 	int cpu;
131 
132 	/*
133 	 * We're using fixed IRQ delivery, can only return one logical APIC ID.
134 	 * May as well be the first.
135 	 */
136 	for_each_cpu_and(cpu, cpumask, andmask)
137 		if (cpumask_test_cpu(cpu, cpu_online_mask))
138 			break;
139 	if (cpu < nr_cpu_ids)
140 		return per_cpu(x86_cpu_to_logical_apicid, cpu);
141 	return BAD_APICID;
142 }
143 
get_apic_id(unsigned long x)144 static unsigned int get_apic_id(unsigned long x)
145 {
146 	unsigned int id;
147 
148 	id = x;
149 	return id;
150 }
151 
set_apic_id(unsigned int id)152 static unsigned long set_apic_id(unsigned int id)
153 {
154 	unsigned long x;
155 
156 	x = id;
157 	return x;
158 }
159 
phys_pkg_id(int index_msb)160 static unsigned int phys_pkg_id(int index_msb)
161 {
162 	return current_cpu_data.initial_apicid >> index_msb;
163 }
164 
x2apic_send_IPI_self(int vector)165 static void x2apic_send_IPI_self(int vector)
166 {
167 	apic_write(APIC_SELF_IPI, vector);
168 }
169 
init_x2apic_ldr(void)170 static void init_x2apic_ldr(void)
171 {
172 	int cpu = smp_processor_id();
173 
174 	per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
175 	return;
176 }
177 
178 struct genapic apic_x2apic_cluster = {
179 	.name = "cluster x2apic",
180 	.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
181 	.int_delivery_mode = dest_LowestPrio,
182 	.int_dest_mode = (APIC_DEST_LOGICAL != 0),
183 	.target_cpus = x2apic_target_cpus,
184 	.vector_allocation_domain = x2apic_vector_allocation_domain,
185 	.apic_id_registered = x2apic_apic_id_registered,
186 	.init_apic_ldr = init_x2apic_ldr,
187 	.send_IPI_all = x2apic_send_IPI_all,
188 	.send_IPI_allbutself = x2apic_send_IPI_allbutself,
189 	.send_IPI_mask = x2apic_send_IPI_mask,
190 	.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
191 	.send_IPI_self = x2apic_send_IPI_self,
192 	.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
193 	.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
194 	.phys_pkg_id = phys_pkg_id,
195 	.get_apic_id = get_apic_id,
196 	.set_apic_id = set_apic_id,
197 	.apic_id_mask = (0xFFFFFFFFu),
198 };
199