• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Author: Huacai Chen <chenhuacai@loongson.cn>
4  * Copyright (C) 2020 Loongson Technology Corporation Limited
5  */
6 #ifndef __ASM_SMP_H
7 #define __ASM_SMP_H
8 
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/linkage.h>
12 #include <linux/smp.h>
13 #include <linux/threads.h>
14 #include <linux/cpumask.h>
15 
16 #ifdef CONFIG_SMP
17 
18 struct task_struct;
19 
20 struct plat_smp_ops {
21 	void (*send_ipi_single)(int cpu, unsigned int action);
22 	void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
23 	void (*smp_setup)(void);
24 	void (*prepare_cpus)(unsigned int max_cpus);
25 	int (*boot_secondary)(int cpu, struct task_struct *idle);
26 	void (*init_secondary)(void);
27 	void (*smp_finish)(void);
28 #ifdef CONFIG_HOTPLUG_CPU
29 	int (*cpu_disable)(void);
30 	void (*cpu_die)(unsigned int cpu);
31 #endif
32 };
33 
34 extern struct plat_smp_ops *mp_ops;
35 void register_smp_ops(const struct plat_smp_ops *ops);
36 
plat_smp_setup(void)37 static inline void plat_smp_setup(void)
38 {
39 	mp_ops->smp_setup();
40 }
41 
42 #else /* !CONFIG_SMP */
43 
44 struct plat_smp_ops;
45 
plat_smp_setup(void)46 static inline void plat_smp_setup(void)
47 {
48 	/* UP, nothing to do ...  */
49 }
50 
register_smp_ops(const struct plat_smp_ops * ops)51 static inline void register_smp_ops(const struct plat_smp_ops *ops)
52 {
53 }
54 
55 #endif /* !CONFIG_SMP */
56 
57 extern int smp_num_siblings;
58 extern int num_processors;
59 extern int disabled_cpus;
60 extern cpumask_t cpu_sibling_map[];
61 extern cpumask_t cpu_core_map[];
62 extern cpumask_t cpu_foreign_map[];
63 
raw_smp_processor_id(void)64 static inline int raw_smp_processor_id(void)
65 {
66 #if defined(__VDSO__)
67 	extern int vdso_smp_processor_id(void)
68 		__compiletime_error("VDSO should not call smp_processor_id()");
69 	return vdso_smp_processor_id();
70 #else
71 	return current_thread_info()->cpu;
72 #endif
73 }
74 #define raw_smp_processor_id raw_smp_processor_id
75 
76 /* Map from cpu id to sequential logical cpu number.  This will only
77    not be idempotent when cpus failed to come on-line.	*/
78 extern int __cpu_number_map[NR_CPUS];
79 #define cpu_number_map(cpu)  __cpu_number_map[cpu]
80 
81 /* The reverse map from sequential logical cpu number to cpu id.  */
82 extern int __cpu_logical_map[NR_CPUS];
83 #define cpu_logical_map(cpu)  __cpu_logical_map[cpu]
84 
85 #define cpu_physical_id(cpu)	cpu_logical_map(cpu)
86 
87 #define SMP_BOOT_CPU		0x1
88 #define SMP_RESCHEDULE		0x2
89 #define SMP_CALL_FUNCTION	0x4
90 
91 struct secondary_data {
92 	unsigned long stack;
93 	unsigned long thread_info;
94 };
95 extern struct secondary_data cpuboot_data;
96 
97 extern asmlinkage void smpboot_entry(void);
98 extern asmlinkage void start_secondary(void);
99 
100 extern void set_cpu_sibling_map(int cpu);
101 extern void clear_cpu_sibling_map(int cpu);
102 extern void calculate_cpu_foreign_map(void);
103 
104 /*
105  * Generate IPI list text
106  */
107 extern void show_ipi_list(struct seq_file *p, int prec);
108 
109 /*
110  * this function sends a 'reschedule' IPI to another CPU.
111  * it goes straight through and wastes no time serializing
112  * anything. Worst case is that we lose a reschedule ...
113  */
smp_send_reschedule(int cpu)114 static inline void smp_send_reschedule(int cpu)
115 {
116 	mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE);
117 }
118 
119 #ifdef CONFIG_HOTPLUG_CPU
__cpu_disable(void)120 static inline int __cpu_disable(void)
121 {
122 	return mp_ops->cpu_disable();
123 }
124 
__cpu_die(unsigned int cpu)125 static inline void __cpu_die(unsigned int cpu)
126 {
127 	mp_ops->cpu_die(cpu);
128 }
129 #endif
130 
arch_send_call_function_single_ipi(int cpu)131 static inline void arch_send_call_function_single_ipi(int cpu)
132 {
133 	mp_ops->send_ipi_single(cpu, SMP_CALL_FUNCTION);
134 }
135 
arch_send_call_function_ipi_mask(const struct cpumask * mask)136 static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
137 {
138 	mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
139 }
140 
141 #endif /* __ASM_SMP_H */
142