1 // Copyright 2019 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #pragma once
15
16 #ifdef __cplusplus
17 extern "C" {
18 #endif
19
20 #include <stdbool.h>
21 #include "soc/rmt_struct.h"
22 #include "soc/soc_caps.h"
23
24 #define RMT_LL_HW_BASE (&RMT)
25 #define RMT_LL_MEM_BASE (&RMTMEM)
26
27 // Note: TX and RX channel number are all index from zero in the LL driver
28 // i.e. tx_channel belongs to [0,7], and rx_channel belongs to [0,7]
29
rmt_ll_enable_drive_clock(rmt_dev_t * dev,bool enable)30 static inline void rmt_ll_enable_drive_clock(rmt_dev_t *dev, bool enable)
31 {
32 dev->conf_ch[0].conf0.clk_en = enable;
33 }
34
rmt_ll_power_down_mem(rmt_dev_t * dev,bool enable)35 static inline void rmt_ll_power_down_mem(rmt_dev_t *dev, bool enable)
36 {
37 dev->conf_ch[0].conf0.mem_pd = enable; // Only conf0 register of channel0 has `mem_pd`
38 }
39
rmt_ll_is_mem_power_down(rmt_dev_t * dev)40 static inline bool rmt_ll_is_mem_power_down(rmt_dev_t *dev)
41 {
42 return dev->conf_ch[0].conf0.mem_pd; // Only conf0 register of channel0 has `mem_pd`
43 }
44
rmt_ll_enable_mem_access(rmt_dev_t * dev,bool enable)45 static inline void rmt_ll_enable_mem_access(rmt_dev_t *dev, bool enable)
46 {
47 dev->apb_conf.fifo_mask = enable;
48 }
49
rmt_ll_set_counter_clock_src(rmt_dev_t * dev,uint32_t channel,uint8_t src,uint8_t div_num,uint8_t div_a,uint8_t div_b)50 static inline void rmt_ll_set_counter_clock_src(rmt_dev_t *dev, uint32_t channel, uint8_t src, uint8_t div_num, uint8_t div_a, uint8_t div_b)
51 {
52 dev->conf_ch[channel].conf1.ref_always_on = src;
53 }
54
rmt_ll_get_counter_clock_src(rmt_dev_t * dev,uint32_t channel)55 static inline uint32_t rmt_ll_get_counter_clock_src(rmt_dev_t *dev, uint32_t channel)
56 {
57 return dev->conf_ch[channel].conf1.ref_always_on;
58 }
59
rmt_ll_tx_reset_counter_clock_div(rmt_dev_t * dev,uint32_t channel)60 static inline void rmt_ll_tx_reset_counter_clock_div(rmt_dev_t *dev, uint32_t channel)
61 {
62 dev->conf_ch[channel].conf1.ref_cnt_rst = 1;
63 dev->conf_ch[channel].conf1.ref_cnt_rst = 0;
64 }
65
rmt_ll_rx_reset_counter_clock_div(rmt_dev_t * dev,uint32_t channel)66 static inline void rmt_ll_rx_reset_counter_clock_div(rmt_dev_t *dev, uint32_t channel)
67 {
68 dev->conf_ch[channel].conf1.ref_cnt_rst = 1;
69 dev->conf_ch[channel].conf1.ref_cnt_rst = 0;
70 }
71
rmt_ll_tx_reset_pointer(rmt_dev_t * dev,uint32_t channel)72 static inline void rmt_ll_tx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
73 {
74 dev->conf_ch[channel].conf1.mem_rd_rst = 1;
75 dev->conf_ch[channel].conf1.mem_rd_rst = 0;
76 }
77
rmt_ll_rx_reset_pointer(rmt_dev_t * dev,uint32_t channel)78 static inline void rmt_ll_rx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
79 {
80 dev->conf_ch[channel].conf1.mem_wr_rst = 1;
81 dev->conf_ch[channel].conf1.mem_wr_rst = 0;
82 }
83
rmt_ll_tx_start(rmt_dev_t * dev,uint32_t channel)84 static inline void rmt_ll_tx_start(rmt_dev_t *dev, uint32_t channel)
85 {
86 dev->conf_ch[channel].conf1.tx_start = 1;
87 }
88
rmt_ll_tx_stop(rmt_dev_t * dev,uint32_t channel)89 static inline void rmt_ll_tx_stop(rmt_dev_t *dev, uint32_t channel)
90 {
91 RMTMEM.chan[channel].data32[0].val = 0;
92 dev->conf_ch[channel].conf1.tx_start = 0;
93 dev->conf_ch[channel].conf1.mem_rd_rst = 1;
94 dev->conf_ch[channel].conf1.mem_rd_rst = 0;
95 }
96
rmt_ll_rx_enable(rmt_dev_t * dev,uint32_t channel,bool enable)97 static inline void rmt_ll_rx_enable(rmt_dev_t *dev, uint32_t channel, bool enable)
98 {
99 dev->conf_ch[channel].conf1.rx_en = enable;
100 }
101
rmt_ll_tx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)102 static inline void rmt_ll_tx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
103 {
104 dev->conf_ch[channel].conf0.mem_size = block_num;
105 }
106
rmt_ll_rx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)107 static inline void rmt_ll_rx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
108 {
109 dev->conf_ch[channel].conf0.mem_size = block_num;
110 }
111
rmt_ll_tx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)112 static inline uint32_t rmt_ll_tx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
113 {
114 return dev->conf_ch[channel].conf0.mem_size;
115 }
116
rmt_ll_rx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)117 static inline uint32_t rmt_ll_rx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
118 {
119 return dev->conf_ch[channel].conf0.mem_size;
120 }
121
rmt_ll_tx_set_counter_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)122 static inline void rmt_ll_tx_set_counter_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
123 {
124 dev->conf_ch[channel].conf0.div_cnt = div;
125 }
126
rmt_ll_rx_set_counter_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)127 static inline void rmt_ll_rx_set_counter_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
128 {
129 dev->conf_ch[channel].conf0.div_cnt = div;
130 }
131
rmt_ll_tx_get_counter_clock_div(rmt_dev_t * dev,uint32_t channel)132 static inline uint32_t rmt_ll_tx_get_counter_clock_div(rmt_dev_t *dev, uint32_t channel)
133 {
134 uint32_t div = dev->conf_ch[channel].conf0.div_cnt;
135 return div == 0 ? 256 : div;
136 }
137
rmt_ll_rx_get_counter_clock_div(rmt_dev_t * dev,uint32_t channel)138 static inline uint32_t rmt_ll_rx_get_counter_clock_div(rmt_dev_t *dev, uint32_t channel)
139 {
140 uint32_t div = dev->conf_ch[channel].conf0.div_cnt;
141 return div == 0 ? 256 : div;
142 }
143
rmt_ll_tx_enable_pingpong(rmt_dev_t * dev,uint32_t channel,bool enable)144 static inline void rmt_ll_tx_enable_pingpong(rmt_dev_t *dev, uint32_t channel, bool enable)
145 {
146 dev->apb_conf.mem_tx_wrap_en = enable;
147 }
148
rmt_ll_rx_set_idle_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)149 static inline void rmt_ll_rx_set_idle_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
150 {
151 dev->conf_ch[channel].conf0.idle_thres = thres;
152 }
153
rmt_ll_rx_get_idle_thres(rmt_dev_t * dev,uint32_t channel)154 static inline uint32_t rmt_ll_rx_get_idle_thres(rmt_dev_t *dev, uint32_t channel)
155 {
156 return dev->conf_ch[channel].conf0.idle_thres;
157 }
158
rmt_ll_rx_set_mem_owner(rmt_dev_t * dev,uint32_t channel,uint8_t owner)159 static inline void rmt_ll_rx_set_mem_owner(rmt_dev_t *dev, uint32_t channel, uint8_t owner)
160 {
161 dev->conf_ch[channel].conf1.mem_owner = owner;
162 }
163
rmt_ll_rx_get_mem_owner(rmt_dev_t * dev,uint32_t channel)164 static inline uint32_t rmt_ll_rx_get_mem_owner(rmt_dev_t *dev, uint32_t channel)
165 {
166 return dev->conf_ch[channel].conf1.mem_owner;
167 }
168
rmt_ll_tx_enable_loop(rmt_dev_t * dev,uint32_t channel,bool enable)169 static inline void rmt_ll_tx_enable_loop(rmt_dev_t *dev, uint32_t channel, bool enable)
170 {
171 dev->conf_ch[channel].conf1.tx_conti_mode = enable;
172 }
173
rmt_ll_is_tx_loop_enabled(rmt_dev_t * dev,uint32_t channel)174 static inline bool rmt_ll_is_tx_loop_enabled(rmt_dev_t *dev, uint32_t channel)
175 {
176 return dev->conf_ch[channel].conf1.tx_conti_mode;
177 }
178
rmt_ll_rx_enable_filter(rmt_dev_t * dev,uint32_t channel,bool enable)179 static inline void rmt_ll_rx_enable_filter(rmt_dev_t *dev, uint32_t channel, bool enable)
180 {
181 dev->conf_ch[channel].conf1.rx_filter_en = enable;
182 }
183
rmt_ll_rx_set_filter_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)184 static inline void rmt_ll_rx_set_filter_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
185 {
186 dev->conf_ch[channel].conf1.rx_filter_thres = thres;
187 }
188
rmt_ll_tx_enable_idle(rmt_dev_t * dev,uint32_t channel,bool enable)189 static inline void rmt_ll_tx_enable_idle(rmt_dev_t *dev, uint32_t channel, bool enable)
190 {
191 dev->conf_ch[channel].conf1.idle_out_en = enable;
192 }
193
rmt_ll_is_tx_idle_enabled(rmt_dev_t * dev,uint32_t channel)194 static inline bool rmt_ll_is_tx_idle_enabled(rmt_dev_t *dev, uint32_t channel)
195 {
196 return dev->conf_ch[channel].conf1.idle_out_en;
197 }
198
rmt_ll_tx_set_idle_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)199 static inline void rmt_ll_tx_set_idle_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
200 {
201 dev->conf_ch[channel].conf1.idle_out_lv = level;
202 }
203
rmt_ll_tx_get_idle_level(rmt_dev_t * dev,uint32_t channel)204 static inline uint32_t rmt_ll_tx_get_idle_level(rmt_dev_t *dev, uint32_t channel)
205 {
206 return dev->conf_ch[channel].conf1.idle_out_lv;
207 }
208
rmt_ll_rx_get_channel_status(rmt_dev_t * dev,uint32_t channel)209 static inline uint32_t rmt_ll_rx_get_channel_status(rmt_dev_t *dev, uint32_t channel)
210 {
211 return dev->status_ch[channel];
212 }
213
rmt_ll_tx_get_channel_status(rmt_dev_t * dev,uint32_t channel)214 static inline uint32_t rmt_ll_tx_get_channel_status(rmt_dev_t *dev, uint32_t channel)
215 {
216 return dev->status_ch[channel];
217 }
218
rmt_ll_tx_set_limit(rmt_dev_t * dev,uint32_t channel,uint32_t limit)219 static inline void rmt_ll_tx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
220 {
221 dev->tx_lim_ch[channel].limit = limit;
222 }
223
rmt_ll_enable_tx_end_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)224 static inline void rmt_ll_enable_tx_end_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
225 {
226 dev->int_ena.val &= ~(1 << (channel * 3));
227 dev->int_ena.val |= (enable << (channel * 3));
228 }
229
rmt_ll_enable_rx_end_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)230 static inline void rmt_ll_enable_rx_end_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
231 {
232 dev->int_ena.val &= ~(1 << (channel * 3 + 1));
233 dev->int_ena.val |= (enable << (channel * 3 + 1));
234 }
235
rmt_ll_enable_tx_err_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)236 static inline void rmt_ll_enable_tx_err_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
237 {
238 dev->int_ena.val &= ~(1 << (channel * 3 + 2));
239 dev->int_ena.val |= (enable << (channel * 3 + 2));
240 }
241
rmt_ll_enable_rx_err_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)242 static inline void rmt_ll_enable_rx_err_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
243 {
244 dev->int_ena.val &= ~(1 << (channel * 3 + 2));
245 dev->int_ena.val |= (enable << (channel * 3 + 2));
246 }
247
rmt_ll_enable_tx_thres_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)248 static inline void rmt_ll_enable_tx_thres_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
249 {
250 dev->int_ena.val &= ~(1 << (channel + 24));
251 dev->int_ena.val |= (enable << (channel + 24));
252 }
253
rmt_ll_clear_tx_end_interrupt(rmt_dev_t * dev,uint32_t channel)254 static inline void rmt_ll_clear_tx_end_interrupt(rmt_dev_t *dev, uint32_t channel)
255 {
256 dev->int_clr.val = (1 << (channel * 3));
257 }
258
rmt_ll_clear_rx_end_interrupt(rmt_dev_t * dev,uint32_t channel)259 static inline void rmt_ll_clear_rx_end_interrupt(rmt_dev_t *dev, uint32_t channel)
260 {
261 dev->int_clr.val = (1 << (channel * 3 + 1));
262 }
263
rmt_ll_clear_tx_err_interrupt(rmt_dev_t * dev,uint32_t channel)264 static inline void rmt_ll_clear_tx_err_interrupt(rmt_dev_t *dev, uint32_t channel)
265 {
266 dev->int_clr.val = (1 << (channel * 3 + 2));
267 }
268
rmt_ll_clear_rx_err_interrupt(rmt_dev_t * dev,uint32_t channel)269 static inline void rmt_ll_clear_rx_err_interrupt(rmt_dev_t *dev, uint32_t channel)
270 {
271 dev->int_clr.val = (1 << (channel * 3 + 2));
272 }
273
rmt_ll_clear_tx_thres_interrupt(rmt_dev_t * dev,uint32_t channel)274 static inline void rmt_ll_clear_tx_thres_interrupt(rmt_dev_t *dev, uint32_t channel)
275 {
276 dev->int_clr.val = (1 << (channel + 24));
277 }
278
rmt_ll_get_tx_end_interrupt_status(rmt_dev_t * dev)279 static inline uint32_t rmt_ll_get_tx_end_interrupt_status(rmt_dev_t *dev)
280 {
281 uint32_t status = dev->int_st.val;
282 return ((status & 0x01) >> 0) | ((status & 0x08) >> 2) | ((status & 0x40) >> 4) | ((status & 0x200) >> 6) |
283 ((status & 0x1000) >> 8) | ((status & 0x8000) >> 10) | ((status & 0x40000) >> 12) | ((status & 0x200000) >> 14);
284 }
285
rmt_ll_get_rx_end_interrupt_status(rmt_dev_t * dev)286 static inline uint32_t rmt_ll_get_rx_end_interrupt_status(rmt_dev_t *dev)
287 {
288 uint32_t status = dev->int_st.val;
289 return ((status & 0x02) >> 1) | ((status & 0x10) >> 3) | ((status & 0x80) >> 5) | ((status & 0x400) >> 7) |
290 ((status & 0x2000) >> 9) | ((status & 0x10000) >> 11) | ((status & 0x80000) >> 13) | ((status & 0x400000) >> 15);
291 }
292
rmt_ll_get_tx_err_interrupt_status(rmt_dev_t * dev)293 static inline uint32_t rmt_ll_get_tx_err_interrupt_status(rmt_dev_t *dev)
294 {
295 uint32_t status = dev->int_st.val;
296 return ((status & 0x04) >> 2) | ((status & 0x20) >> 4) | ((status & 0x100) >> 6) | ((status & 0x800) >> 8) |
297 ((status & 0x4000) >> 10) | ((status & 0x20000) >> 12) | ((status & 0x100000) >> 14) | ((status & 0x800000) >> 16);
298 }
299
rmt_ll_get_rx_err_interrupt_status(rmt_dev_t * dev)300 static inline uint32_t rmt_ll_get_rx_err_interrupt_status(rmt_dev_t *dev)
301 {
302 uint32_t status = dev->int_st.val;
303 return ((status & 0x04) >> 2) | ((status & 0x20) >> 4) | ((status & 0x100) >> 6) | ((status & 0x800) >> 8) |
304 ((status & 0x4000) >> 10) | ((status & 0x20000) >> 12) | ((status & 0x100000) >> 14) | ((status & 0x800000) >> 16);
305 }
306
rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t * dev)307 static inline uint32_t rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t *dev)
308 {
309 uint32_t status = dev->int_st.val;
310 return (status & 0xFF000000) >> 24;
311 }
312
rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t high_ticks,uint32_t low_ticks)313 static inline void rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
314 {
315 dev->carrier_duty_ch[channel].high = high_ticks;
316 dev->carrier_duty_ch[channel].low = low_ticks;
317 }
318
rmt_ll_tx_get_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t * high_ticks,uint32_t * low_ticks)319 static inline void rmt_ll_tx_get_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t *high_ticks, uint32_t *low_ticks)
320 {
321 *high_ticks = dev->carrier_duty_ch[channel].high;
322 *low_ticks = dev->carrier_duty_ch[channel].low;
323 }
324
rmt_ll_tx_enable_carrier_modulation(rmt_dev_t * dev,uint32_t channel,bool enable)325 static inline void rmt_ll_tx_enable_carrier_modulation(rmt_dev_t *dev, uint32_t channel, bool enable)
326 {
327 dev->conf_ch[channel].conf0.carrier_en = enable;
328 }
329
rmt_ll_tx_set_carrier_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)330 static inline void rmt_ll_tx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
331 {
332 dev->conf_ch[channel].conf0.carrier_out_lv = level;
333 }
334
335 //Writes items to the specified TX channel memory with the given offset and writen length.
336 //the caller should ensure that (length + off) <= (memory block * SOC_RMT_CHANNEL_MEM_WORDS)
rmt_ll_write_memory(rmt_mem_t * mem,uint32_t channel,const rmt_item32_t * data,uint32_t length,uint32_t off)337 static inline void rmt_ll_write_memory(rmt_mem_t *mem, uint32_t channel, const rmt_item32_t *data, uint32_t length, uint32_t off)
338 {
339 for (uint32_t i = 0; i < length; i++) {
340 mem->chan[channel].data32[i + off].val = data[i].val;
341 }
342 }
343
rmt_ll_config_update(rmt_dev_t * dev,uint32_t channel)344 static inline void rmt_ll_config_update(rmt_dev_t *dev, uint32_t channel)
345 {
346 }
347
348 /************************************************************************************************
349 * Following Low Level APIs only used for backward compatible, will be deprecated in the IDF v5.0
350 ***********************************************************************************************/
351
rmt_ll_set_intr_enable_mask(uint32_t mask)352 static inline void rmt_ll_set_intr_enable_mask(uint32_t mask)
353 {
354 RMT.int_ena.val |= mask;
355 }
356
rmt_ll_clr_intr_enable_mask(uint32_t mask)357 static inline void rmt_ll_clr_intr_enable_mask(uint32_t mask)
358 {
359 RMT.int_ena.val &= (~mask);
360 }
361
362 #ifdef __cplusplus
363 }
364 #endif
365