• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3  * Licensed under the Mulan PSL v2.
4  * You can use this software according to the terms and conditions of the Mulan PSL v2.
5  * You may obtain a copy of Mulan PSL v2 at:
6  *     http://license.coscl.org.cn/MulanPSL2
7  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8  * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9  * PURPOSE.
10  * See the Mulan PSL v2 for more details.
11  */
12 #include <irq/ipi.h>
13 #include <machine.h>
14 #include <common/kprint.h>
15 
16 struct ipi_data {
17     /* Grab this lock before writing to this ipi_data */
18     struct lock lock;
19 
20     /* finish <- 1: the ipi_data (arguments) is handled */
21     volatile u16 finish;
22 
23     /* The IPI vector */
24     u32 vector;
25 
26     unsigned long args[IPI_DATA_ARG_NUM];
27 };
28 
29 /* IPI data shared among all the CPUs */
30 static struct ipi_data ipi_data[PLAT_CPU_NUM];
31 
32 /* Invoked once during the kernel boot */
init_ipi_data(void)33 void init_ipi_data(void)
34 {
35     int i;
36 
37     for (i = 0; i < PLAT_CPU_NUM; ++i) {
38         lock_init(&ipi_data[i].lock);
39         ipi_data[i].finish = 1;
40     }
41 }
42 
43 /*
44  * Interfaces for inter-cpu communication (named IPI_transaction).
45  * IPI-based message sending.
46  */
47 
48 /* Lock the target buffer */
prepare_ipi_tx(u32 target_cpu)49 void prepare_ipi_tx(u32 target_cpu)
50 {
51     struct ipi_data *data_target, *data_self;
52 
53     data_target = &(ipi_data[target_cpu]);
54     data_self = &(ipi_data[smp_get_cpu_id()]);
55 
56     /*
57      * Handle IPI tx while waiting to avoid deadlock.
58      *
59      * Deadlock example:
60      * CPU 0: in prepare_ipi_tx(), waiting for ipi_data[1]->lock;
61      * CPU 1: in prepare_ipi_tx(), waiting for ipi_data[0]->lock;
62      * CPU 2: in wait_finish_ipi_tx(), holding ipi_data[0]->lock,
63      *        waiting for CPU 0 to finish an IPI tx;
64      * CPU 3: in wait_finish_ipi_tx(), holding ipi_data[1]->lock,
65      *        waiting for CPU 1 to finish an IPI tx.
66      */
67     while (try_lock(&data_target->lock)) {
68         if (!data_self->finish) {
69             handle_ipi();
70         }
71     }
72 }
73 
74 /* Set argments */
set_ipi_tx_arg(u32 target_cpu,u32 arg_index,unsigned long val)75 void set_ipi_tx_arg(u32 target_cpu, u32 arg_index, unsigned long val)
76 {
77     ipi_data[target_cpu].args[arg_index] = val;
78 }
79 
80 /*
81  * Start IPI-based transaction (tx).
82  *
83  * ipi_vector can be encoded into the physical interrupt (as IRQ number),
84  * which can be used to implement some fast-path (simple) communication.
85  *
86  * Nevertheless, we support sending information with IPI.
87  * So, actually, we can use one ipi_vector to distinguish different IPIs.
88  */
start_ipi_tx(u32 target_cpu,u32 ipi_vector)89 void start_ipi_tx(u32 target_cpu, u32 ipi_vector)
90 {
91     struct ipi_data *data;
92 
93     data = &(ipi_data[target_cpu]);
94 
95     /* Set ipi_vector */
96     data->vector = ipi_vector;
97 
98     smp_mb();
99 
100     /* Mark the arguments are ready (set_ipi_tx_arg before) */
101     data->finish = 0;
102 
103     smp_mb();
104 
105     /* Send physical IPI to interrupt the target CPU */
106     arch_send_ipi(target_cpu, ipi_vector);
107 }
108 
109 /* Wait and unlock */
wait_finish_ipi_tx(u32 target_cpu)110 void wait_finish_ipi_tx(u32 target_cpu)
111 {
112     /*
113      * It is possible that core-A is waiting for core-B to finish one IPI
114      * while core-B is also waiting for core-A to finish one IPI.
115      * So, this function will polling on the IPI data of both the target
116      * core and the local core, namely data_target and data_self.
117      */
118     struct ipi_data *data_target, *data_self;
119 
120     data_target = &(ipi_data[target_cpu]);
121     data_self = &(ipi_data[smp_get_cpu_id()]);
122 
123     /* Avoid dead lock by checking and handling ipi request while waiting */
124     while (!data_target->finish) {
125         if (!data_self->finish) {
126             handle_ipi();
127         }
128     }
129 
130     unlock(&(data_target->lock));
131 }
132 
133 /* Send IPI tx without argument */
send_ipi(u32 target_cpu,u32 ipi_vector)134 void send_ipi(u32 target_cpu, u32 ipi_vector)
135 {
136     prepare_ipi_tx(target_cpu);
137     start_ipi_tx(target_cpu, ipi_vector);
138     wait_finish_ipi_tx(target_cpu);
139 }
140 
141 /* Receiver side interfaces */
142 
143 #define ipi_data_self (&ipi_data[smp_get_cpu_id()])
144 
145 /* Get argments */
get_ipi_tx_arg(u32 arg_index)146 unsigned long get_ipi_tx_arg(u32 arg_index)
147 {
148     return ipi_data_self->args[arg_index];
149 }
150 
151 /* Handle IPI tx */
handle_ipi(void)152 void handle_ipi(void)
153 {
154     struct ipi_data *data = ipi_data_self;
155 
156     /* The IPI tx may have been completed in wait_finish_ipi_tx() */
157     if (data->finish) {
158         return;
159     }
160 
161     arch_handle_ipi(data->vector);
162 
163     data->finish = 1;
164 }
165