1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #include "pw_spi_mcuxpresso/responder.h"
15
16 #include <cinttypes>
17
18 #include "pw_assert/check.h"
19 #include "pw_log/log.h"
20 #include "pw_status/try.h"
21
22 // Vendor terminology requires this to be disabled.
23 // inclusive-language: disable
24
25 namespace pw::spi {
26 namespace {
27
SpanData(ByteSpan & span)28 uint8_t* SpanData(ByteSpan& span) {
29 static_assert(std::is_same_v<uint8_t, unsigned char>);
30 return reinterpret_cast<uint8_t*>(span.data());
31 }
32
SpanDataDiscardConst(ConstByteSpan & span)33 uint8_t* SpanDataDiscardConst(ConstByteSpan& span) {
34 static_assert(std::is_same_v<uint8_t, unsigned char>);
35 return const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(span.data()));
36 }
37
ToPwStatus(status_t status)38 Status ToPwStatus(status_t status) {
39 switch (status) {
40 // Intentional fall-through
41 case kStatus_Success:
42 case kStatus_SPI_Idle:
43 return OkStatus();
44 case kStatus_ReadOnly:
45 return Status::PermissionDenied();
46 case kStatus_OutOfRange:
47 return Status::OutOfRange();
48 case kStatus_InvalidArgument:
49 return Status::InvalidArgument();
50 case kStatus_Timeout:
51 return Status::DeadlineExceeded();
52 case kStatus_NoTransferInProgress:
53 return Status::FailedPrecondition();
54 // Intentional fall-through
55 case kStatus_Fail:
56 default:
57 PW_LOG_ERROR("Mcuxpresso SPI unknown error code: %d",
58 static_cast<int>(status));
59 return Status::Unknown();
60 }
61 }
62
SetSdkConfig(const McuxpressoResponder::Config & config,spi_slave_config_t & sdk_config)63 Status SetSdkConfig(const McuxpressoResponder::Config& config,
64 spi_slave_config_t& sdk_config) {
65 switch (config.polarity) {
66 case ClockPolarity::kActiveLow:
67 sdk_config.polarity = kSPI_ClockPolarityActiveLow;
68 break;
69 case ClockPolarity::kActiveHigh:
70 sdk_config.polarity = kSPI_ClockPolarityActiveHigh;
71 break;
72 default:
73 return Status::InvalidArgument();
74 }
75
76 switch (config.phase) {
77 case ClockPhase::kRisingEdge:
78 sdk_config.phase = kSPI_ClockPhaseFirstEdge;
79 break;
80 case ClockPhase::kFallingEdge:
81 sdk_config.phase = kSPI_ClockPhaseSecondEdge;
82 break;
83 default:
84 return Status::InvalidArgument();
85 }
86
87 switch (config.bit_order) {
88 case BitOrder::kMsbFirst:
89 sdk_config.direction = kSPI_MsbFirst;
90 break;
91 case BitOrder::kLsbFirst:
92 sdk_config.direction = kSPI_LsbFirst;
93 break;
94 default:
95 return Status::InvalidArgument();
96 }
97
98 switch (config.bits_per_word()) {
99 case 4:
100 sdk_config.dataWidth = kSPI_Data4Bits;
101 break;
102 case 5:
103 sdk_config.dataWidth = kSPI_Data5Bits;
104 break;
105 case 6:
106 sdk_config.dataWidth = kSPI_Data6Bits;
107 break;
108 case 7:
109 sdk_config.dataWidth = kSPI_Data7Bits;
110 break;
111 case 8:
112 sdk_config.dataWidth = kSPI_Data8Bits;
113 break;
114 case 9:
115 sdk_config.dataWidth = kSPI_Data9Bits;
116 break;
117 case 10:
118 sdk_config.dataWidth = kSPI_Data10Bits;
119 break;
120 case 11:
121 sdk_config.dataWidth = kSPI_Data11Bits;
122 break;
123 case 12:
124 sdk_config.dataWidth = kSPI_Data12Bits;
125 break;
126 case 13:
127 sdk_config.dataWidth = kSPI_Data13Bits;
128 break;
129 case 14:
130 sdk_config.dataWidth = kSPI_Data14Bits;
131 break;
132 case 15:
133 sdk_config.dataWidth = kSPI_Data15Bits;
134 break;
135 case 16:
136 sdk_config.dataWidth = kSPI_Data16Bits;
137 break;
138 default:
139 return Status::InvalidArgument();
140 }
141
142 return OkStatus();
143 }
144
145 //
146 // Helpful things missing from the SDK
147 //
148 const IRQn_Type spi_irq_map[] = SPI_IRQS;
149
150 // Enable interrupt on CS asserted / de-asserted.
SPI_EnableSSInterrupt(SPI_Type * base)151 void SPI_EnableSSInterrupt(SPI_Type* base) {
152 base->STAT = SPI_STAT_SSA_MASK | SPI_STAT_SSD_MASK; // Clear first
153 base->INTENSET = SPI_INTENSET_SSAEN_MASK | SPI_INTENSET_SSDEN_MASK;
154 }
155
156 // Disable interrupt on CS asserted / de-asserted.
SPI_DisableSSInterrupt(SPI_Type * base)157 void SPI_DisableSSInterrupt(SPI_Type* base) {
158 base->INTENCLR = SPI_INTENSET_SSAEN_MASK | SPI_INTENSET_SSDEN_MASK;
159 }
160
161 // Empty the TX and RX FIFOs.
SPI_EmptyFifos(SPI_Type * base)162 void SPI_EmptyFifos(SPI_Type* base) {
163 base->FIFOCFG |= SPI_FIFOCFG_EMPTYTX_MASK | SPI_FIFOCFG_EMPTYRX_MASK;
164 }
165
SPI_RxFifoIsEmpty(SPI_Type * base)166 bool SPI_RxFifoIsEmpty(SPI_Type* base) {
167 // RXNOTEMPTY: Receive FIFO is Not Empty
168 // 0 - The receive FIFO is empty.
169 // 1 - The receive FIFO is not empty, so data can be read.
170 return !(base->FIFOSTAT & SPI_FIFOSTAT_RXNOTEMPTY_MASK);
171 }
172
173 // Non-FIFO interrupt sources
174 enum _spi_interrupt_sources {
175 kSPI_SlaveSelAssertIrq = SPI_INTENSET_SSAEN_MASK,
176 kSPI_SlaveSelDeassertIrq = SPI_INTENSET_SSDEN_MASK,
177 kSPI_MasterIdleIrq = SPI_INTENSET_MSTIDLEEN_MASK,
178 };
179
180 // Gets a bitmap of active (pending + enabled) interrupts.
181 // Test against _spi_interrupt_sources constants.
SPI_GetActiveInterrupts(SPI_Type * base)182 uint32_t SPI_GetActiveInterrupts(SPI_Type* base) {
183 // Verify that the bits in INTSTAT and INTENSET are the same.
184 static_assert(SPI_INTSTAT_SSA_MASK == SPI_INTENSET_SSAEN_MASK);
185 static_assert(SPI_INTSTAT_SSD_MASK == SPI_INTENSET_SSDEN_MASK);
186 static_assert(SPI_INTSTAT_MSTIDLE_MASK == SPI_INTENSET_MSTIDLEEN_MASK);
187 return base->INTSTAT & base->INTENSET;
188 }
189
190 // Clears a bitmap of active interrupts.
191 // This acknowledges the interrupt; it does not disable it.
192 // @irqs is either kSPI_SlaveSelAssertIrq or kSPI_SlaveSelDeassertIrq.
SPI_ClearActiveInterrupts(SPI_Type * base,uint32_t irqs)193 void SPI_ClearActiveInterrupts(SPI_Type* base, uint32_t irqs) {
194 // Verify that the bits in STAT match the enum.
195 static_assert(SPI_STAT_SSA_MASK == kSPI_SlaveSelAssertIrq);
196 static_assert(SPI_STAT_SSD_MASK == kSPI_SlaveSelDeassertIrq);
197 PW_CHECK((irqs & ~(kSPI_SlaveSelAssertIrq | kSPI_SlaveSelDeassertIrq)) == 0);
198 base->STAT = irqs; // write to clear
199 }
200
201 } // namespace
202
Initialize()203 Status McuxpressoResponder::Initialize() {
204 status_t sdk_status;
205 spi_slave_config_t sdk_config;
206 spi_dma_callback_t callback;
207
208 SPI_SlaveGetDefaultConfig(&sdk_config);
209 PW_TRY(SetSdkConfig(config_, sdk_config));
210
211 // Hard coded for now, till added to Config
212 sdk_config.sselPol = kSPI_SpolActiveAllLow;
213
214 sdk_status = SPI_SlaveInit(base_, &sdk_config);
215 if (sdk_status != kStatus_Success) {
216 PW_LOG_ERROR("SPI_SlaveInit failed: %ld", sdk_status);
217 return ToPwStatus(sdk_status);
218 }
219
220 if (config_.handle_cs) {
221 // Set up the FLEXCOMM IRQ to get CS assertion/deassertion.
222 // See SPI_MasterTransferCreateHandle().
223 // Note that the 'handle' argument can actually be anything.
224 FLEXCOMM_SetIRQHandler(base_, FlexcommSpiIrqHandler, this);
225
226 // Enable SPI interrupt in NVIC
227 uint32_t instance = SPI_GetInstance(base_);
228 EnableIRQ(spi_irq_map[instance]);
229
230 // We only use the CS deassertion interrupt to complete transfers.
231 // Don't provide any callback to the SPI driver (to be invoked by DMA IRQ).
232 callback = nullptr;
233
234 // Disable the DMA channel interrupts.
235 // If we leave them enabled, then the SPI driver could complete a full
236 // transfer, move the state to kSPI_Idle, and prevent
237 // SPI_SlaveTransferGetCountDMA() from working.
238 rx_dma_.DisableInterrupts();
239 tx_dma_.DisableInterrupts();
240 } else {
241 // Without CS deassertion, we use the SPI driver callback (invoked by DMA
242 // IRQ) to complete transfers.
243 callback = McuxpressoResponder::SdkCallback;
244
245 // Enable the DMA channel interrupts.
246 // These are enabled by default by DMA_CreateHandle(), but re-enable them
247 // anyway in case they were disabled for some reason.
248 rx_dma_.EnableInterrupts();
249 tx_dma_.EnableInterrupts();
250 }
251
252 sdk_status = SPI_SlaveTransferCreateHandleDMA(
253 base_, &handle_, callback, this, tx_dma_.handle(), rx_dma_.handle());
254 if (sdk_status != kStatus_Success) {
255 PW_LOG_ERROR("SPI_SlaveTransferCreateHandleDMA failed: %ld", sdk_status);
256 return ToPwStatus(sdk_status);
257 }
258
259 return OkStatus();
260 }
261
TransferComplete(Status status,size_t bytes_transferred)262 void McuxpressoResponder::TransferComplete(Status status,
263 size_t bytes_transferred) {
264 if (config_.handle_cs) {
265 SPI_DisableSSInterrupt(base_);
266 }
267
268 // Abort the DMA transfer (if active).
269 SPI_SlaveTransferAbortDMA(base_, &handle_);
270
271 // Check for TX underflow / RX overflow
272 // TODO(jrreinhart): Unfortunately we can't do this. We want to check for
273 // FIFO under/overflow *while* the transfer is running, but if the initiator
274 // sent more bytes than the DMA was set up to receive, both of these errors
275 // will happen (after the DMA is complete). We would need to find a way to
276 // capture this status immediate when the DMA is complete, or otherwise
277 // monitor it during the transfer.
278 #if 0
279 if (status.ok()) {
280 if (SPI_RxError(base_)) {
281 PW_LOG_ERROR("RX FIFO overflow detected!");
282 status = Status::DataLoss();
283 }
284 if (SPI_TxError(base_)) {
285 PW_LOG_ERROR("TX FIFO underflow detected!");
286 status = Status::DataLoss();
287 }
288 }
289 #endif
290
291 // TODO(jrreinhart) Remove these safety checks.
292 if (rx_dma_.IsBusy()) {
293 PW_LOG_WARN("After completion, rx_dma still busy!");
294 }
295 if (rx_dma_.IsActive()) {
296 PW_LOG_WARN("After completion, rx_dma still active!");
297 }
298
299 // Empty the FIFOs.
300 // If the initiator sent more bytes than the DMA was set up to receive, the
301 // RXFIFO will have the residue. This isn't strictly necessary since they'll
302 // be cleared on the next call to SPI_SlaveTransferDMA(), but we do it anyway
303 // for cleanliness.
304 SPI_EmptyFifos(base_);
305
306 // Clear the FIFO DMA request signals.
307 //
308 // From IMXRT500RM 53.4.2.1.2 DMA operation:
309 // "A DMA request is provided for each SPI direction, and can be used instead
310 // of interrupts for transferring data... The DMA controller provides an
311 // acknowledgement signal that clears the related request when it (the DMA
312 // controller) completes handling that request."
313 //
314 // If the initiator sent more bytes than the DMA was set up to receive, this
315 // request signal will remain latched on, even after the FIFO is emptied.
316 // This would cause a subsequent transfer to receive one stale residual byte
317 // from this prior transfer.
318 //
319 // We force if off here by disabling the DMA request signal.
320 // It will be re-enabled on the next transfer.
321 SPI_EnableRxDMA(base_, false);
322 SPI_EnableTxDMA(base_, false);
323
324 // Invoke the callback
325 auto received = current_transaction_.rx_data.subspan(0, bytes_transferred);
326 current_transaction_ = {};
327 completion_callback_(received, status);
328 }
329
SdkCallback(SPI_Type * base,spi_dma_handle_t * handle,status_t sdk_status,void * userData)330 void McuxpressoResponder::SdkCallback(SPI_Type* base,
331 spi_dma_handle_t* handle,
332 status_t sdk_status,
333 void* userData) {
334 // WARNING: This is called in IRQ context.
335 auto* responder = static_cast<McuxpressoResponder*>(userData);
336 PW_CHECK_PTR_EQ(base, responder->base_);
337 PW_CHECK_PTR_EQ(handle, &responder->handle_);
338
339 return responder->DmaComplete(sdk_status);
340 }
341
DmaComplete(status_t sdk_status)342 void McuxpressoResponder::DmaComplete(status_t sdk_status) {
343 // WARNING: This is called in IRQ context.
344 PW_CHECK(!config_.handle_cs,
345 "DmaComplete should never be called when handle_cs=true!");
346
347 // Move to idle state.
348 if (State prev; !TryChangeState(State::kBusy, State::kIdle, &prev)) {
349 // Spurious callback? Or race condition in DoWriteReadAsync()?
350 PW_LOG_WARN("DmaComplete not in busy state, but %u",
351 static_cast<unsigned int>(prev));
352 return;
353 }
354
355 // Transfer complete.
356 auto status = ToPwStatus(sdk_status);
357 size_t bytes_transferred =
358 status.ok() ? current_transaction_.rx_data.size() : 0;
359 TransferComplete(status, bytes_transferred);
360 }
361
FlexcommSpiIrqHandler(void * base,void * arg)362 void McuxpressoResponder::FlexcommSpiIrqHandler(void* base, void* arg) {
363 // WARNING: This is called in IRQ context.
364
365 SPI_Type* spi = static_cast<SPI_Type*>(base);
366 auto* responder = static_cast<McuxpressoResponder*>(arg);
367 PW_CHECK_PTR_EQ(spi, responder->base_);
368
369 // NOTE: It's possible that CS could deassert and INTSTAT.SSD could latch
370 // shortly after the IRQ handler is entered (due to INTSTAT.SSA), re-setting
371 // the IRQ as pending in the NVIC. In this case, we could handle both SSA and
372 // SSD in the same interrupt. When that happens, the IRQ remains pended in
373 // the NVIC, and the handler will file again. We simply ignore the second
374 // interrupt.
375 //
376 // It would wrong to try and handle only one of SSA or SSD per invocation
377 // because if the interrupt was handled late enough, it might only fire once.
378 const auto active_irqs = SPI_GetActiveInterrupts(spi);
379
380 // CS asserted?
381 if (active_irqs & kSPI_SlaveSelAssertIrq) {
382 SPI_ClearActiveInterrupts(spi, kSPI_SlaveSelAssertIrq);
383 responder->CsAsserted();
384 }
385
386 // CS de-asserted?
387 if (active_irqs & kSPI_SlaveSelDeassertIrq) {
388 SPI_ClearActiveInterrupts(spi, kSPI_SlaveSelDeassertIrq);
389 responder->CsDeasserted();
390 }
391 }
392
CsAsserted()393 void McuxpressoResponder::CsAsserted() {
394 // WARNING: This is called in IRQ context.
395 }
396
WaitForQuiescenceAfterCsDeassertion()397 Status McuxpressoResponder::WaitForQuiescenceAfterCsDeassertion() {
398 // When CS is deasserted, the master is indicating that it has finished
399 // clocking out data into our FIFO. That could be more, less, or the same
400 // number of bytes requested by the user (in DoWriteReadAsync).
401 //
402 // Definitions:
403 // S: The DMA transfer size (as requested by the user).
404 // M: The number of bytes sent by the master.
405 //
406 // Case | Condition | DMA will complete? | FIFO will empty?
407 // -----|-----------|--------------------|-------------------
408 // 1 | M < S | No | Yes
409 // 2 | M = S | Yes | Yes
410 // 3 | M > S | Yes | No
411 //
412 // At this point, the RX FIFO might still have data that the DMA has not yet
413 // read.
414 //
415 // We wait for either the DMA channel to become inactive (case 2 or 3) or for
416 // the RX FIFO to become empty (case 1 or 2). When the FIFO empties, we also
417 // need to wait for the DMA channel to be non-busy, indicating that it has
418 // finished moving the data to SRAM.
419 //
420 // It is expected that by the time this function is called, the hardware will
421 // have already quiesced, and we won't actually wait at all. A warning log
422 // will indicate if that assumption does not hold true.
423 constexpr unsigned int kMaxWaitCount = 10000; // Arbitrary
424
425 unsigned int wait_count;
426 for (wait_count = 0; wait_count < kMaxWaitCount; ++wait_count) {
427 if (!rx_dma_.IsActive()) {
428 // The DMA has consumed as many bytes from the FIFO as it ever will.
429 break;
430 }
431
432 if (SPI_RxFifoIsEmpty(base_) && !rx_dma_.IsBusy()) {
433 // The FIFO is empty, and the DMA channel has moved all data to SRAM.
434 break;
435 }
436
437 // DMA is still active and FIFO is not empty. We need to wait.
438 }
439
440 if (wait_count == kMaxWaitCount) {
441 PW_LOG_ERROR(
442 "After CS de-assertion, timed out waiting for DMA done or FIFO empty.");
443 return Status::DeadlineExceeded();
444 }
445
446 if (wait_count != 0) {
447 PW_LOG_WARN(
448 "After CS de-assertion, waited %u times for DMA done or FIFO empty.",
449 wait_count);
450 }
451 return OkStatus();
452 }
453
CsDeasserted()454 void McuxpressoResponder::CsDeasserted() {
455 // WARNING: This is called in IRQ context.
456 PW_CHECK(config_.handle_cs,
457 "CsDeasserted should only be called when handle_cs=true!");
458
459 // Move to idle state.
460 if (State prev; !TryChangeState(State::kBusy, State::kIdle, &prev)) {
461 PW_LOG_WARN("CsDeasserted not in busy state, but %u",
462 static_cast<unsigned int>(prev));
463 return;
464 }
465
466 Status wait_status = WaitForQuiescenceAfterCsDeassertion();
467
468 // Get the number of bytes actually transferred.
469 //
470 // NOTE: SPI_SlaveTransferGetCountDMA() fails if _handle.state != kSPI_Busy.
471 // Thus, it must be called before SPI_SlaveTransferAbortDMA() which changes
472 // the state to kSPI_Idle. Also, the DMA channel interrupts are disabled when
473 // CS is respected, because SPI_RxDMACallback() and SPI_TxDMACallback() also
474 // change the state to kSPI_Idle.
475 size_t bytes_transferred = 0;
476 status_t sdk_status =
477 SPI_SlaveTransferGetCountDMA(base_, &handle_, &bytes_transferred);
478
479 // Transfer complete.
480 Status xfer_status = OkStatus();
481 if (!wait_status.ok()) {
482 bytes_transferred = 0;
483 xfer_status = wait_status;
484 } else if (sdk_status != kStatus_Success) {
485 PW_LOG_ERROR("SPI_SlaveTransferGetCountDMA() returned %" PRId32,
486 sdk_status);
487 bytes_transferred = 0;
488 xfer_status = ToPwStatus(sdk_status);
489 }
490 TransferComplete(xfer_status, bytes_transferred);
491 }
492
DoWriteReadAsync(ConstByteSpan tx_data,ByteSpan rx_data)493 Status McuxpressoResponder::DoWriteReadAsync(ConstByteSpan tx_data,
494 ByteSpan rx_data) {
495 if (!TryChangeState(State::kIdle, State::kBusy)) {
496 PW_LOG_ERROR("Transaction already started");
497 return Status::FailedPrecondition();
498 }
499 PW_CHECK(!current_transaction_);
500
501 // TODO(jrreinhart): There is a race here. If DoCancel() is called, it will
502 // move to kIdle, and invoke the callback with CANCELLED. But then we will
503 // still go on to perform the transfer anyway. When the transfer completes,
504 // SdkCallback will see kIdle and skip the callback. We avoid this problem
505 // by saying that DoWriteReadAsync() and DoCancel() should not be called from
506 // different threads, thus we only have to worry about DoCancel() racing the
507 // hardware / IRQ.
508
509 spi_transfer_t transfer = {};
510
511 if (!tx_data.empty() && !rx_data.empty()) {
512 // spi_transfer_t has only a single dataSize member, so tx_data and
513 // rx_data must be the same size. Separate rx/tx data sizes could
514 // theoretically be handled, but the SDK doesn't support it.
515 //
516 // TODO(jrreinhart) Support separate rx/tx data sizes.
517 // For non-DMA, it's a pretty simple patch.
518 // It should be doable for DMA also, but I haven't looked into it.
519 if (tx_data.size() != rx_data.size()) {
520 return Status::InvalidArgument();
521 }
522
523 transfer.txData = SpanDataDiscardConst(tx_data);
524 transfer.rxData = SpanData(rx_data);
525 transfer.dataSize = rx_data.size();
526 } else if (!tx_data.empty()) {
527 transfer.txData = SpanDataDiscardConst(tx_data);
528 transfer.dataSize = tx_data.size();
529 } else if (!rx_data.empty()) {
530 transfer.rxData = SpanData(rx_data);
531 transfer.dataSize = rx_data.size();
532 } else {
533 return Status::InvalidArgument();
534 }
535
536 current_transaction_ = {
537 .rx_data = rx_data,
538 };
539
540 if (config_.handle_cs) {
541 // Complete the transfer when CS is deasserted.
542 SPI_EnableSSInterrupt(base_);
543 }
544
545 status_t sdk_status = SPI_SlaveTransferDMA(base_, &handle_, &transfer);
546 if (sdk_status != kStatus_Success) {
547 PW_LOG_ERROR("SPI_SlaveTransferDMA failed: %ld", sdk_status);
548 return ToPwStatus(sdk_status);
549 }
550
551 return OkStatus();
552 }
553
DoCancel()554 void McuxpressoResponder::DoCancel() {
555 if (!TryChangeState(State::kBusy, State::kIdle)) {
556 return;
557 }
558 TransferComplete(Status::Cancelled(), 0);
559 }
560
561 } // namespace pw::spi
562