1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::collections::BTreeMap;
6 use std::str::FromStr;
7 use std::sync::Arc;
8 use std::time::Duration;
9
10 use acpi_tables::aml;
11 use acpi_tables::aml::Aml;
12 use anyhow::bail;
13 use anyhow::Context;
14 use base::custom_serde::serialize_arc_mutex;
15 use base::error;
16 use base::warn;
17 use base::Error as SysError;
18 use base::Event;
19 use base::EventToken;
20 use base::EventWaitResult;
21 use base::SendTube;
22 use base::Tube;
23 use base::VmEventType;
24 use base::WaitContext;
25 use base::WorkerThread;
26 use serde::Deserialize;
27 use serde::Serialize;
28 use snapshot::AnySnapshot;
29 use sync::Mutex;
30 use thiserror::Error;
31 use vm_control::GpeNotify;
32 use vm_control::PmResource;
33 use vm_control::PmeNotify;
34 use vm_control::VmRequest;
35 use vm_control::VmResponse;
36
37 use crate::ac_adapter::AcAdapter;
38 use crate::pci::pm::PmConfig;
39 use crate::pci::CrosvmDeviceId;
40 use crate::BusAccessInfo;
41 use crate::BusDevice;
42 use crate::BusResumeDevice;
43 use crate::DeviceId;
44 use crate::IrqLevelEvent;
45 use crate::Suspendable;
46
47 #[derive(Error, Debug)]
48 pub enum ACPIPMError {
49 /// Creating WaitContext failed.
50 #[error("failed to create wait context: {0}")]
51 CreateWaitContext(SysError),
52 /// Error while waiting for events.
53 #[error("failed to wait for events: {0}")]
54 WaitError(SysError),
55 #[error("Did not find group_id corresponding to acpi_mc_group")]
56 AcpiMcGroupError,
57 #[error("Failed to create and bind NETLINK_GENERIC socket for acpi_mc_group: {0}")]
58 AcpiEventSockError(base::Error),
59 #[error("GPE {0} is out of bound")]
60 GpeOutOfBound(u32),
61 }
62
63 #[derive(Debug, Copy, Clone, Serialize, Deserialize)]
64 pub enum ACPIPMFixedEvent {
65 GlobalLock,
66 PowerButton,
67 SleepButton,
68 RTC,
69 }
70
71 #[derive(Serialize)]
72 pub(crate) struct Pm1Resource {
73 pub(crate) status: u16,
74 enable: u16,
75 control: u16,
76 #[serde(skip_serializing)]
77 suspend_tube: Arc<Mutex<SendTube>>,
78 #[serde(skip_serializing)]
79 rtc_clear_evt: Option<Event>,
80 }
81
82 #[derive(Deserialize)]
83 struct Pm1ResourceSerializable {
84 status: u16,
85 enable: u16,
86 control: u16,
87 }
88
89 #[derive(Serialize)]
90 pub(crate) struct GpeResource {
91 pub(crate) status: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
92 enable: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
93 #[serde(skip_serializing)]
94 pub(crate) gpe_notify: BTreeMap<u32, Vec<Arc<Mutex<dyn GpeNotify>>>>,
95 // For each triggered GPE, a vector of events to check when resampling
96 // sci_evt. If any events are un-signaled, then sci_evt should be re-asserted.
97 #[serde(skip_serializing)]
98 pending_clear_evts: BTreeMap<u32, Vec<Event>>,
99 #[serde(skip_serializing)]
100 suspend_tube: Arc<Mutex<SendTube>>,
101 }
102
103 #[derive(Deserialize)]
104 struct GpeResourceSerializable {
105 status: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
106 enable: [u8; ACPIPM_RESOURCE_GPE0_BLK_LEN as usize / 2],
107 }
108
109 #[derive(Serialize, Deserialize, Clone)]
110 pub(crate) struct PciResource {
111 #[serde(skip_serializing, skip_deserializing)]
112 pub(crate) pme_notify: BTreeMap<u8, Vec<Arc<Mutex<dyn PmeNotify>>>>,
113 }
114
115 /// ACPI PM resource for handling OS suspend/resume request
116 #[allow(dead_code)]
117 #[derive(Serialize)]
118 pub struct ACPIPMResource {
119 // This is SCI interrupt that will be raised in the VM.
120 #[serde(skip_serializing)]
121 sci_evt: IrqLevelEvent,
122 #[serde(skip_serializing)]
123 worker_thread: Option<WorkerThread<()>>,
124 #[serde(skip_serializing)]
125 suspend_tube: Arc<Mutex<SendTube>>,
126 #[serde(skip_serializing)]
127 exit_evt_wrtube: SendTube,
128 #[serde(serialize_with = "serialize_arc_mutex")]
129 pm1: Arc<Mutex<Pm1Resource>>,
130 #[serde(serialize_with = "serialize_arc_mutex")]
131 gpe0: Arc<Mutex<GpeResource>>,
132 #[serde(serialize_with = "serialize_arc_mutex")]
133 pci: Arc<Mutex<PciResource>>,
134 #[serde(skip_serializing)]
135 acdc: Option<Arc<Mutex<AcAdapter>>>,
136 }
137
138 #[derive(Deserialize)]
139 struct ACPIPMResrourceSerializable {
140 pm1: Pm1ResourceSerializable,
141 gpe0: GpeResourceSerializable,
142 }
143
144 impl ACPIPMResource {
145 /// Constructs ACPI Power Management Resouce.
146 #[allow(dead_code)]
new( sci_evt: IrqLevelEvent, suspend_tube: Arc<Mutex<SendTube>>, exit_evt_wrtube: SendTube, acdc: Option<Arc<Mutex<AcAdapter>>>, ) -> ACPIPMResource147 pub fn new(
148 sci_evt: IrqLevelEvent,
149 suspend_tube: Arc<Mutex<SendTube>>,
150 exit_evt_wrtube: SendTube,
151 acdc: Option<Arc<Mutex<AcAdapter>>>,
152 ) -> ACPIPMResource {
153 let pm1 = Pm1Resource {
154 status: 0,
155 enable: 0,
156 control: 0,
157 suspend_tube: suspend_tube.clone(),
158 rtc_clear_evt: None,
159 };
160 let gpe0 = GpeResource {
161 status: Default::default(),
162 enable: Default::default(),
163 gpe_notify: BTreeMap::new(),
164 pending_clear_evts: BTreeMap::new(),
165 suspend_tube: suspend_tube.clone(),
166 };
167 let pci = PciResource {
168 pme_notify: BTreeMap::new(),
169 };
170
171 ACPIPMResource {
172 sci_evt,
173 worker_thread: None,
174 suspend_tube,
175 exit_evt_wrtube,
176 pm1: Arc::new(Mutex::new(pm1)),
177 gpe0: Arc::new(Mutex::new(gpe0)),
178 pci: Arc::new(Mutex::new(pci)),
179 acdc,
180 }
181 }
182
start(&mut self)183 pub fn start(&mut self) {
184 let sci_evt = self.sci_evt.try_clone().expect("failed to clone event");
185 let pm1 = self.pm1.clone();
186 let gpe0 = self.gpe0.clone();
187 let acdc = self.acdc.clone();
188
189 let acpi_event_ignored_gpe = Vec::new();
190
191 self.worker_thread = Some(WorkerThread::start("ACPI PM worker", move |kill_evt| {
192 if let Err(e) = run_worker(sci_evt, kill_evt, pm1, gpe0, acpi_event_ignored_gpe, acdc) {
193 error!("{}", e);
194 }
195 }));
196 }
197 }
198
199 impl Suspendable for ACPIPMResource {
snapshot(&mut self) -> anyhow::Result<AnySnapshot>200 fn snapshot(&mut self) -> anyhow::Result<AnySnapshot> {
201 if !self.gpe0.lock().pending_clear_evts.is_empty() {
202 bail!("ACPIPMResource is busy");
203 }
204 AnySnapshot::to_any(&self)
205 .with_context(|| format!("error serializing {}", self.debug_label()))
206 }
207
restore(&mut self, data: AnySnapshot) -> anyhow::Result<()>208 fn restore(&mut self, data: AnySnapshot) -> anyhow::Result<()> {
209 let acpi_snapshot: ACPIPMResrourceSerializable = AnySnapshot::from_any(data)
210 .with_context(|| format!("error deserializing {}", self.debug_label()))?;
211 {
212 let mut pm1 = self.pm1.lock();
213 pm1.status = acpi_snapshot.pm1.status;
214 pm1.enable = acpi_snapshot.pm1.enable;
215 pm1.control = acpi_snapshot.pm1.control;
216 }
217 {
218 let mut gpe0 = self.gpe0.lock();
219 gpe0.status = acpi_snapshot.gpe0.status;
220 gpe0.enable = acpi_snapshot.gpe0.enable;
221 }
222 Ok(())
223 }
224
sleep(&mut self) -> anyhow::Result<()>225 fn sleep(&mut self) -> anyhow::Result<()> {
226 if let Some(worker_thread) = self.worker_thread.take() {
227 worker_thread.stop();
228 }
229 Ok(())
230 }
231
wake(&mut self) -> anyhow::Result<()>232 fn wake(&mut self) -> anyhow::Result<()> {
233 self.start();
234 Ok(())
235 }
236 }
237
run_worker( sci_evt: IrqLevelEvent, kill_evt: Event, pm1: Arc<Mutex<Pm1Resource>>, gpe0: Arc<Mutex<GpeResource>>, acpi_event_ignored_gpe: Vec<u32>, arced_ac_adapter: Option<Arc<Mutex<AcAdapter>>>, ) -> Result<(), ACPIPMError>238 fn run_worker(
239 sci_evt: IrqLevelEvent,
240 kill_evt: Event,
241 pm1: Arc<Mutex<Pm1Resource>>,
242 gpe0: Arc<Mutex<GpeResource>>,
243 acpi_event_ignored_gpe: Vec<u32>,
244 arced_ac_adapter: Option<Arc<Mutex<AcAdapter>>>,
245 ) -> Result<(), ACPIPMError> {
246 let acpi_event_sock = crate::sys::get_acpi_event_sock()?;
247 #[derive(EventToken)]
248 enum Token {
249 AcpiEvent,
250 InterruptResample,
251 Kill,
252 }
253
254 let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
255 (sci_evt.get_resample(), Token::InterruptResample),
256 (&kill_evt, Token::Kill),
257 ])
258 .map_err(ACPIPMError::CreateWaitContext)?;
259 if let Some(acpi_event_sock) = &acpi_event_sock {
260 wait_ctx
261 .add(acpi_event_sock, Token::AcpiEvent)
262 .map_err(ACPIPMError::CreateWaitContext)?;
263 }
264
265 loop {
266 let events = wait_ctx.wait().map_err(ACPIPMError::WaitError)?;
267 for event in events.iter().filter(|e| e.is_readable) {
268 match event.token {
269 Token::AcpiEvent => {
270 crate::sys::acpi_event_run(
271 &sci_evt,
272 &acpi_event_sock,
273 &gpe0,
274 &acpi_event_ignored_gpe,
275 &arced_ac_adapter,
276 );
277 }
278 Token::InterruptResample => {
279 sci_evt.clear_resample();
280
281 // Re-trigger SCI if PM1 or GPE status is still not cleared.
282 pm1.lock().resample_clear_evts_and_trigger(&sci_evt);
283 gpe0.lock().resample_clear_evts_and_trigger(&sci_evt);
284 }
285 Token::Kill => return Ok(()),
286 }
287 }
288 }
289 }
290
291 impl Pm1Resource {
trigger_sci(&self, sci_evt: &IrqLevelEvent)292 fn trigger_sci(&self, sci_evt: &IrqLevelEvent) {
293 if self.status & self.enable & ACPIPMFixedEvent::bitmask_all() != 0 {
294 if let Err(e) = sci_evt.trigger() {
295 error!("ACPIPM: failed to trigger sci event for pm1: {}", e);
296 }
297 if let Err(e) = self.suspend_tube.lock().send(&false) {
298 error!("ACPIPM: failed to trigger wake event: {}", e);
299 }
300 }
301 }
302
resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent)303 fn resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent) {
304 if let Some(clear_evt) = self.rtc_clear_evt.take() {
305 if clear_evt.wait_timeout(Duration::ZERO) == Ok(EventWaitResult::TimedOut) {
306 self.rtc_clear_evt = Some(clear_evt);
307 self.status |= ACPIPMFixedEvent::RTC.bitmask();
308 }
309 }
310 self.trigger_sci(sci_evt);
311 }
312 }
313
314 impl GpeResource {
trigger_sci(&self, sci_evt: &IrqLevelEvent)315 pub fn trigger_sci(&self, sci_evt: &IrqLevelEvent) {
316 if (0..self.status.len()).any(|i| self.status[i] & self.enable[i] != 0) {
317 if let Err(e) = sci_evt.trigger() {
318 error!("ACPIPM: failed to trigger sci event for gpe: {}", e);
319 }
320 if let Err(e) = self.suspend_tube.lock().send(&false) {
321 error!("ACPIPM: failed to trigger wake event: {}", e);
322 }
323 }
324 }
325
set_active(&mut self, gpe: u32) -> Result<(), ACPIPMError>326 pub fn set_active(&mut self, gpe: u32) -> Result<(), ACPIPMError> {
327 if let Some(status_byte) = self.status.get_mut(gpe as usize / 8) {
328 *status_byte |= 1 << (gpe % 8);
329 } else {
330 return Err(ACPIPMError::GpeOutOfBound(gpe));
331 }
332 Ok(())
333 }
334
resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent)335 pub fn resample_clear_evts_and_trigger(&mut self, sci_evt: &IrqLevelEvent) {
336 let mut retained = Vec::new();
337 self.pending_clear_evts.retain(|gpe, clear_evts| {
338 clear_evts.retain(|clear_evt| {
339 clear_evt.wait_timeout(Duration::ZERO) == Ok(EventWaitResult::TimedOut)
340 });
341 if !clear_evts.is_empty() {
342 retained.push(*gpe);
343 }
344 !clear_evts.is_empty()
345 });
346 for gpe in retained.into_iter() {
347 self.set_active(gpe).expect("bad gpe index");
348 }
349
350 self.trigger_sci(sci_evt);
351 }
352 }
353
354 /// the ACPI PM register length.
355 pub const ACPIPM_RESOURCE_EVENTBLK_LEN: u8 = 4;
356 pub const ACPIPM_RESOURCE_CONTROLBLK_LEN: u8 = 2;
357 pub const ACPIPM_RESOURCE_GPE0_BLK_LEN: u8 = 64;
358 pub const ACPIPM_RESOURCE_LEN: u8 = ACPIPM_RESOURCE_EVENTBLK_LEN + 4 + ACPIPM_RESOURCE_GPE0_BLK_LEN;
359
360 // Should be in sync with gpe_allocator range
361 pub const ACPIPM_GPE_MAX: u16 = ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2 * 8 - 1;
362
363 // ACPI PM register value definitions
364
365 /// Section 4.8.4.1.1 PM1 Status Registers, ACPI Spec Version 6.4
366 /// Register Location: <PM1a_EVT_BLK / PM1b_EVT_BLK> System I/O or Memory Space (defined in FADT)
367 /// Size: PM1_EVT_LEN / 2 (defined in FADT)
368 const PM1_STATUS: u16 = 0;
369
370 /// Section 4.8.4.1.2 PM1Enable Registers, ACPI Spec Version 6.4
371 /// Register Location: <<PM1a_EVT_BLK / PM1b_EVT_BLK> + PM1_EVT_LEN / 2 System I/O or Memory Space
372 /// (defined in FADT)
373 /// Size: PM1_EVT_LEN / 2 (defined in FADT)
374 const PM1_ENABLE: u16 = PM1_STATUS + (ACPIPM_RESOURCE_EVENTBLK_LEN as u16 / 2);
375
376 /// Section 4.8.4.2.1 PM1 Control Registers, ACPI Spec Version 6.4
377 /// Register Location: <PM1a_CNT_BLK / PM1b_CNT_BLK> System I/O or Memory Space (defined in FADT)
378 /// Size: PM1_CNT_LEN (defined in FADT)
379 const PM1_CONTROL: u16 = PM1_STATUS + ACPIPM_RESOURCE_EVENTBLK_LEN as u16;
380
381 /// Section 4.8.5.1 General-Purpose Event Register Blocks, ACPI Spec Version 6.4
382 /// - Each register block contains two registers: an enable and a status register.
383 /// - Each register block is 32-bit aligned.
384 /// - Each register in the block is accessed as a byte.
385 ///
386 /// Section 4.8.5.1.1 General-Purpose Event 0 Register Block, ACPI Spec Version 6.4
387 /// This register block consists of two registers: The GPE0_STS and the GPE0_EN registers. Each
388 /// register’s length is defined to be half the length of the GPE0 register block, and is described
389 /// in the ACPI FADT’s GPE0_BLK and GPE0_BLK_LEN operators.
390 ///
391 /// Section 4.8.5.1.1.1 General-Purpose Event 0 Status Register, ACPI Spec Version 6.4
392 /// Register Location: <GPE0_STS> System I/O or System Memory Space (defined in FADT)
393 /// Size: GPE0_BLK_LEN/2 (defined in FADT)
394 const GPE0_STATUS: u16 = PM1_STATUS + ACPIPM_RESOURCE_EVENTBLK_LEN as u16 + 4; // ensure alignment
395
396 /// Section 4.8.5.1.1.2 General-Purpose Event 0 Enable Register, ACPI Spec Version 6.4
397 /// Register Location: <GPE0_EN> System I/O or System Memory Space (defined in FADT)
398 /// Size: GPE0_BLK_LEN/2 (defined in FADT)
399 const GPE0_ENABLE: u16 = GPE0_STATUS + (ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2);
400
401 /// Section 4.8.4.1.1, 4.8.4.1.2 Fixed event bits in both PM1 Status and PM1 Enable registers.
402 const BITSHIFT_PM1_GBL: u16 = 5;
403 const BITSHIFT_PM1_PWRBTN: u16 = 8;
404 const BITSHIFT_PM1_SLPBTN: u16 = 9;
405 const BITSHIFT_PM1_RTC: u16 = 10;
406
407 const BITMASK_PM1CNT_SLEEP_ENABLE: u16 = 0x2000;
408 const BITMASK_PM1CNT_WAKE_STATUS: u16 = 0x8000;
409
410 const BITMASK_PM1CNT_SLEEP_TYPE: u16 = 0x1C00;
411 const SLEEP_TYPE_S1: u16 = 1 << 10;
412 const SLEEP_TYPE_S5: u16 = 0 << 10;
413
414 impl ACPIPMFixedEvent {
bitshift(self) -> u16415 fn bitshift(self) -> u16 {
416 match self {
417 ACPIPMFixedEvent::GlobalLock => BITSHIFT_PM1_GBL,
418 ACPIPMFixedEvent::PowerButton => BITSHIFT_PM1_PWRBTN,
419 ACPIPMFixedEvent::SleepButton => BITSHIFT_PM1_SLPBTN,
420 ACPIPMFixedEvent::RTC => BITSHIFT_PM1_RTC,
421 }
422 }
423
bitmask(self) -> u16424 pub(crate) fn bitmask(self) -> u16 {
425 1 << self.bitshift()
426 }
427
bitmask_all() -> u16428 fn bitmask_all() -> u16 {
429 (1 << BITSHIFT_PM1_GBL)
430 | (1 << BITSHIFT_PM1_PWRBTN)
431 | (1 << BITSHIFT_PM1_SLPBTN)
432 | (1 << BITSHIFT_PM1_RTC)
433 }
434 }
435
436 impl FromStr for ACPIPMFixedEvent {
437 type Err = &'static str;
438
from_str(s: &str) -> Result<Self, Self::Err>439 fn from_str(s: &str) -> Result<Self, Self::Err> {
440 match s {
441 "gbllock" => Ok(ACPIPMFixedEvent::GlobalLock),
442 "powerbtn" => Ok(ACPIPMFixedEvent::PowerButton),
443 "sleepbtn" => Ok(ACPIPMFixedEvent::SleepButton),
444 "rtc" => Ok(ACPIPMFixedEvent::RTC),
445 _ => Err("unknown event, must be: gbllock|powerbtn|sleepbtn|rtc"),
446 }
447 }
448 }
449
450 impl PmResource for ACPIPMResource {
pwrbtn_evt(&mut self)451 fn pwrbtn_evt(&mut self) {
452 let mut pm1 = self.pm1.lock();
453
454 pm1.status |= ACPIPMFixedEvent::PowerButton.bitmask();
455 pm1.trigger_sci(&self.sci_evt);
456 }
457
slpbtn_evt(&mut self)458 fn slpbtn_evt(&mut self) {
459 let mut pm1 = self.pm1.lock();
460
461 pm1.status |= ACPIPMFixedEvent::SleepButton.bitmask();
462 pm1.trigger_sci(&self.sci_evt);
463 }
464
rtc_evt(&mut self, clear_evt: Event)465 fn rtc_evt(&mut self, clear_evt: Event) {
466 let mut pm1 = self.pm1.lock();
467
468 pm1.rtc_clear_evt = Some(clear_evt);
469 pm1.status |= ACPIPMFixedEvent::RTC.bitmask();
470 pm1.trigger_sci(&self.sci_evt);
471 }
472
gpe_evt(&mut self, gpe: u32, clear_evt: Option<Event>)473 fn gpe_evt(&mut self, gpe: u32, clear_evt: Option<Event>) {
474 let mut gpe0 = self.gpe0.lock();
475 match gpe0.set_active(gpe) {
476 Ok(_) => {
477 if let Some(clear_evt) = clear_evt {
478 gpe0.pending_clear_evts
479 .entry(gpe)
480 .or_default()
481 .push(clear_evt);
482 }
483 gpe0.trigger_sci(&self.sci_evt)
484 }
485 Err(e) => error!("{}", e),
486 }
487 }
488
pme_evt(&mut self, requester_id: u16)489 fn pme_evt(&mut self, requester_id: u16) {
490 let bus = ((requester_id >> 8) & 0xFF) as u8;
491 let mut pci = self.pci.lock();
492 if let Some(root_ports) = pci.pme_notify.get_mut(&bus) {
493 for root_port in root_ports {
494 root_port.lock().notify(requester_id);
495 }
496 }
497 }
498
register_gpe_notify_dev(&mut self, gpe: u32, notify_dev: Arc<Mutex<dyn GpeNotify>>)499 fn register_gpe_notify_dev(&mut self, gpe: u32, notify_dev: Arc<Mutex<dyn GpeNotify>>) {
500 let mut gpe0 = self.gpe0.lock();
501 match gpe0.gpe_notify.get_mut(&gpe) {
502 Some(v) => v.push(notify_dev),
503 None => {
504 gpe0.gpe_notify.insert(gpe, vec![notify_dev]);
505 }
506 }
507 }
508
register_pme_notify_dev(&mut self, bus: u8, notify_dev: Arc<Mutex<dyn PmeNotify>>)509 fn register_pme_notify_dev(&mut self, bus: u8, notify_dev: Arc<Mutex<dyn PmeNotify>>) {
510 let mut pci = self.pci.lock();
511 match pci.pme_notify.get_mut(&bus) {
512 Some(v) => v.push(notify_dev),
513 None => {
514 pci.pme_notify.insert(bus, vec![notify_dev]);
515 }
516 }
517 }
518 }
519
520 const PM1_STATUS_LAST: u16 = PM1_STATUS + (ACPIPM_RESOURCE_EVENTBLK_LEN as u16 / 2) - 1;
521 const PM1_ENABLE_LAST: u16 = PM1_ENABLE + (ACPIPM_RESOURCE_EVENTBLK_LEN as u16 / 2) - 1;
522 const PM1_CONTROL_LAST: u16 = PM1_CONTROL + ACPIPM_RESOURCE_CONTROLBLK_LEN as u16 - 1;
523 const GPE0_STATUS_LAST: u16 = GPE0_STATUS + (ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2) - 1;
524 const GPE0_ENABLE_LAST: u16 = GPE0_ENABLE + (ACPIPM_RESOURCE_GPE0_BLK_LEN as u16 / 2) - 1;
525
526 impl BusDevice for ACPIPMResource {
device_id(&self) -> DeviceId527 fn device_id(&self) -> DeviceId {
528 CrosvmDeviceId::ACPIPMResource.into()
529 }
530
debug_label(&self) -> String531 fn debug_label(&self) -> String {
532 "ACPIPMResource".to_owned()
533 }
534
read(&mut self, info: BusAccessInfo, data: &mut [u8])535 fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
536 match info.offset as u16 {
537 // Accesses to the PM1 registers are done through byte or word accesses
538 PM1_STATUS..=PM1_STATUS_LAST => {
539 if data.len() > std::mem::size_of::<u16>()
540 || info.offset + data.len() as u64 > (PM1_STATUS_LAST + 1).into()
541 {
542 warn!("ACPIPM: bad read size: {}", data.len());
543 return;
544 }
545 let offset = (info.offset - PM1_STATUS as u64) as usize;
546
547 let v = self.pm1.lock().status.to_ne_bytes();
548 for (i, j) in (offset..offset + data.len()).enumerate() {
549 data[i] = v[j];
550 }
551 }
552 PM1_ENABLE..=PM1_ENABLE_LAST => {
553 if data.len() > std::mem::size_of::<u16>()
554 || info.offset + data.len() as u64 > (PM1_ENABLE_LAST + 1).into()
555 {
556 warn!("ACPIPM: bad read size: {}", data.len());
557 return;
558 }
559 let offset = (info.offset - PM1_ENABLE as u64) as usize;
560
561 let v = self.pm1.lock().enable.to_ne_bytes();
562 for (i, j) in (offset..offset + data.len()).enumerate() {
563 data[i] = v[j];
564 }
565 }
566 PM1_CONTROL..=PM1_CONTROL_LAST => {
567 if data.len() > std::mem::size_of::<u16>()
568 || info.offset + data.len() as u64 > (PM1_CONTROL_LAST + 1).into()
569 {
570 warn!("ACPIPM: bad read size: {}", data.len());
571 return;
572 }
573 let offset = (info.offset - PM1_CONTROL as u64) as usize;
574 data.copy_from_slice(
575 &self.pm1.lock().control.to_ne_bytes()[offset..offset + data.len()],
576 );
577 }
578 // OSPM accesses GPE registers through byte accesses (regardless of their length)
579 GPE0_STATUS..=GPE0_STATUS_LAST => {
580 if data.len() > std::mem::size_of::<u8>()
581 || info.offset + data.len() as u64 > (GPE0_STATUS_LAST + 1).into()
582 {
583 warn!("ACPIPM: bad read size: {}", data.len());
584 return;
585 }
586 let offset = (info.offset - GPE0_STATUS as u64) as usize;
587 data[0] = self.gpe0.lock().status[offset];
588 }
589 GPE0_ENABLE..=GPE0_ENABLE_LAST => {
590 if data.len() > std::mem::size_of::<u8>()
591 || info.offset + data.len() as u64 > (GPE0_ENABLE_LAST + 1).into()
592 {
593 warn!("ACPIPM: bad read size: {}", data.len());
594 return;
595 }
596 let offset = (info.offset - GPE0_ENABLE as u64) as usize;
597 data[0] = self.gpe0.lock().enable[offset];
598 }
599 _ => {
600 warn!("ACPIPM: Bad read from {}", info);
601 }
602 }
603 }
604
write(&mut self, info: BusAccessInfo, data: &[u8])605 fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
606 match info.offset as u16 {
607 // Accesses to the PM1 registers are done through byte or word accesses
608 PM1_STATUS..=PM1_STATUS_LAST => {
609 if data.len() > std::mem::size_of::<u16>()
610 || info.offset + data.len() as u64 > (PM1_STATUS_LAST + 1).into()
611 {
612 warn!("ACPIPM: bad write size: {}", data.len());
613 return;
614 }
615 let offset = (info.offset - PM1_STATUS as u64) as usize;
616
617 let mut pm1 = self.pm1.lock();
618 let mut v = pm1.status.to_ne_bytes();
619 for (i, j) in (offset..offset + data.len()).enumerate() {
620 v[j] &= !data[i];
621 }
622 pm1.status = u16::from_ne_bytes(v);
623 }
624 PM1_ENABLE..=PM1_ENABLE_LAST => {
625 if data.len() > std::mem::size_of::<u16>()
626 || info.offset + data.len() as u64 > (PM1_ENABLE_LAST + 1).into()
627 {
628 warn!("ACPIPM: bad write size: {}", data.len());
629 return;
630 }
631 let offset = (info.offset - PM1_ENABLE as u64) as usize;
632
633 let mut pm1 = self.pm1.lock();
634 let mut v = pm1.enable.to_ne_bytes();
635 for (i, j) in (offset..offset + data.len()).enumerate() {
636 v[j] = data[i];
637 }
638 pm1.enable = u16::from_ne_bytes(v);
639 pm1.resample_clear_evts_and_trigger(&self.sci_evt);
640 }
641 PM1_CONTROL..=PM1_CONTROL_LAST => {
642 if data.len() > std::mem::size_of::<u16>()
643 || info.offset + data.len() as u64 > (PM1_CONTROL_LAST + 1).into()
644 {
645 warn!("ACPIPM: bad write size: {}", data.len());
646 return;
647 }
648 let offset = (info.offset - PM1_CONTROL as u64) as usize;
649
650 let mut pm1 = self.pm1.lock();
651
652 let mut v = pm1.control.to_ne_bytes();
653 for (i, j) in (offset..offset + data.len()).enumerate() {
654 v[j] = data[i];
655 }
656 let val = u16::from_ne_bytes(v);
657
658 // SLP_EN is a write-only bit and reads to it always return a zero
659 if (val & BITMASK_PM1CNT_SLEEP_ENABLE) != 0 {
660 match val & BITMASK_PM1CNT_SLEEP_TYPE {
661 SLEEP_TYPE_S1 => {
662 if let Err(e) = self.suspend_tube.lock().send(&true) {
663 error!("ACPIPM: failed to trigger suspend event: {}", e);
664 }
665 }
666 SLEEP_TYPE_S5 => {
667 if let Err(e) =
668 self.exit_evt_wrtube.send::<VmEventType>(&VmEventType::Exit)
669 {
670 error!("ACPIPM: failed to trigger exit event: {}", e);
671 }
672 }
673 _ => error!(
674 "ACPIPM: unknown SLP_TYP written: {}",
675 (val & BITMASK_PM1CNT_SLEEP_TYPE) >> 10
676 ),
677 }
678 }
679 pm1.control = val & !BITMASK_PM1CNT_SLEEP_ENABLE;
680
681 // Re-trigger PM & GPEs in case there is a pending wakeup that should
682 // override us just having gone to sleep.
683 pm1.resample_clear_evts_and_trigger(&self.sci_evt);
684 self.gpe0
685 .lock()
686 .resample_clear_evts_and_trigger(&self.sci_evt);
687 }
688 // OSPM accesses GPE registers through byte accesses (regardless of their length)
689 GPE0_STATUS..=GPE0_STATUS_LAST => {
690 if data.len() > std::mem::size_of::<u8>()
691 || info.offset + data.len() as u64 > (GPE0_STATUS_LAST + 1).into()
692 {
693 warn!("ACPIPM: bad write size: {}", data.len());
694 return;
695 }
696 let offset = (info.offset - GPE0_STATUS as u64) as usize;
697 self.gpe0.lock().status[offset] &= !data[0];
698 }
699 GPE0_ENABLE..=GPE0_ENABLE_LAST => {
700 if data.len() > std::mem::size_of::<u8>()
701 || info.offset + data.len() as u64 > (GPE0_ENABLE_LAST + 1).into()
702 {
703 warn!("ACPIPM: bad write size: {}", data.len());
704 return;
705 }
706 let offset = (info.offset - GPE0_ENABLE as u64) as usize;
707 let mut gpe = self.gpe0.lock();
708 if gpe.enable[offset] != data[0] {
709 gpe.enable[offset] = data[0];
710 gpe.resample_clear_evts_and_trigger(&self.sci_evt);
711 }
712 }
713 _ => {
714 warn!("ACPIPM: Bad write to {}", info);
715 }
716 };
717 }
718 }
719
720 impl BusResumeDevice for ACPIPMResource {
resume_imminent(&mut self)721 fn resume_imminent(&mut self) {
722 self.pm1.lock().status |= BITMASK_PM1CNT_WAKE_STATUS;
723 }
724 }
725
726 impl Aml for ACPIPMResource {
to_aml_bytes(&self, bytes: &mut Vec<u8>)727 fn to_aml_bytes(&self, bytes: &mut Vec<u8>) {
728 // S1
729 aml::Name::new(
730 "_S1_".into(),
731 &aml::Package::new(vec![&aml::ONE, &aml::ONE, &aml::ZERO, &aml::ZERO]),
732 )
733 .to_aml_bytes(bytes);
734
735 // S5
736 aml::Name::new(
737 "_S5_".into(),
738 &aml::Package::new(vec![&aml::ZERO, &aml::ZERO, &aml::ZERO, &aml::ZERO]),
739 )
740 .to_aml_bytes(bytes);
741 }
742 }
743
744 pub const PM_WAKEUP_GPIO: u32 = 0;
745
746 pub struct PmWakeupEvent {
747 vm_control_tube: Arc<Mutex<Tube>>,
748 pm_config: Arc<Mutex<PmConfig>>,
749 }
750
751 impl PmWakeupEvent {
new(vm_control_tube: Arc<Mutex<Tube>>, pm_config: Arc<Mutex<PmConfig>>) -> Self752 pub fn new(vm_control_tube: Arc<Mutex<Tube>>, pm_config: Arc<Mutex<PmConfig>>) -> Self {
753 Self {
754 vm_control_tube,
755 pm_config,
756 }
757 }
758
trigger_wakeup(&self) -> anyhow::Result<Option<Event>>759 pub fn trigger_wakeup(&self) -> anyhow::Result<Option<Event>> {
760 if self.pm_config.lock().should_trigger_pme() {
761 let event = Event::new().context("failed to create clear event")?;
762 let tube = self.vm_control_tube.lock();
763 tube.send(&VmRequest::Gpe {
764 gpe: PM_WAKEUP_GPIO,
765 clear_evt: Some(event.try_clone().context("failed to clone clear event")?),
766 })
767 .context("failed to send pme")?;
768 match tube.recv::<VmResponse>() {
769 Ok(VmResponse::Ok) => Ok(Some(event)),
770 e => bail!("pme failure {:?}", e),
771 }
772 } else {
773 Ok(None)
774 }
775 }
776 }
777
778 #[cfg(test)]
779 mod tests {
780 use base::Tube;
781
782 use super::*;
783 use crate::suspendable_tests;
784
get_send_tube() -> SendTube785 fn get_send_tube() -> SendTube {
786 Tube::directional_pair().unwrap().0
787 }
788
get_irq_evt() -> IrqLevelEvent789 fn get_irq_evt() -> IrqLevelEvent {
790 match crate::IrqLevelEvent::new() {
791 Ok(evt) => evt,
792 Err(e) => panic!(
793 "failed to create irqlevelevt: {} - panic. Can't test ACPI",
794 e
795 ),
796 }
797 }
798
modify_device(acpi: &mut ACPIPMResource)799 fn modify_device(acpi: &mut ACPIPMResource) {
800 {
801 let mut pm1 = acpi.pm1.lock();
802 pm1.enable += 1;
803 }
804 }
805
806 suspendable_tests!(
807 acpi,
808 ACPIPMResource::new(
809 get_irq_evt(),
810 Arc::new(Mutex::new(get_send_tube())),
811 get_send_tube(),
812 None,
813 ),
814 modify_device
815 );
816 }
817