• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) Ericsson AB 2007-2008
3  * Copyright (C) ST-Ericsson SA 2008-2010
4  * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5  * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6  * License terms: GNU General Public License (GPL) version 2
7  */
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/err.h>
20 #include <linux/amba/bus.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/platform_data/dma-ste-dma40.h>
23 
24 #include "dmaengine.h"
25 #include "ste_dma40_ll.h"
26 
27 #define D40_NAME "dma40"
28 
29 #define D40_PHY_CHAN -1
30 
31 /* For masking out/in 2 bit channel positions */
32 #define D40_CHAN_POS(chan)  (2 * (chan / 2))
33 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
34 
35 /* Maximum iterations taken before giving up suspending a channel */
36 #define D40_SUSPEND_MAX_IT 500
37 
38 /* Milliseconds */
39 #define DMA40_AUTOSUSPEND_DELAY	100
40 
41 /* Hardware requirement on LCLA alignment */
42 #define LCLA_ALIGNMENT 0x40000
43 
44 /* Max number of links per event group */
45 #define D40_LCLA_LINK_PER_EVENT_GRP 128
46 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
47 
48 /* Attempts before giving up to trying to get pages that are aligned */
49 #define MAX_LCLA_ALLOC_ATTEMPTS 256
50 
51 /* Bit markings for allocation map */
52 #define D40_ALLOC_FREE		(1 << 31)
53 #define D40_ALLOC_PHY		(1 << 30)
54 #define D40_ALLOC_LOG_FREE	0
55 
56 #define MAX(a, b) (((a) < (b)) ? (b) : (a))
57 
58 /**
59  * enum 40_command - The different commands and/or statuses.
60  *
61  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
62  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
63  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
64  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
65  */
66 enum d40_command {
67 	D40_DMA_STOP		= 0,
68 	D40_DMA_RUN		= 1,
69 	D40_DMA_SUSPEND_REQ	= 2,
70 	D40_DMA_SUSPENDED	= 3
71 };
72 
73 /*
74  * enum d40_events - The different Event Enables for the event lines.
75  *
76  * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
77  * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
78  * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
79  * @D40_ROUND_EVENTLINE: Status check for event line.
80  */
81 
82 enum d40_events {
83 	D40_DEACTIVATE_EVENTLINE	= 0,
84 	D40_ACTIVATE_EVENTLINE		= 1,
85 	D40_SUSPEND_REQ_EVENTLINE	= 2,
86 	D40_ROUND_EVENTLINE		= 3
87 };
88 
89 /*
90  * These are the registers that has to be saved and later restored
91  * when the DMA hw is powered off.
92  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
93  */
94 static u32 d40_backup_regs[] = {
95 	D40_DREG_LCPA,
96 	D40_DREG_LCLA,
97 	D40_DREG_PRMSE,
98 	D40_DREG_PRMSO,
99 	D40_DREG_PRMOE,
100 	D40_DREG_PRMOO,
101 };
102 
103 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
104 
105 /*
106  * since 9540 and 8540 has the same HW revision
107  * use v4a for 9540 or ealier
108  * use v4b for 8540 or later
109  * HW revision:
110  * DB8500ed has revision 0
111  * DB8500v1 has revision 2
112  * DB8500v2 has revision 3
113  * AP9540v1 has revision 4
114  * DB8540v1 has revision 4
115  * TODO: Check if all these registers have to be saved/restored on dma40 v4a
116  */
117 static u32 d40_backup_regs_v4a[] = {
118 	D40_DREG_PSEG1,
119 	D40_DREG_PSEG2,
120 	D40_DREG_PSEG3,
121 	D40_DREG_PSEG4,
122 	D40_DREG_PCEG1,
123 	D40_DREG_PCEG2,
124 	D40_DREG_PCEG3,
125 	D40_DREG_PCEG4,
126 	D40_DREG_RSEG1,
127 	D40_DREG_RSEG2,
128 	D40_DREG_RSEG3,
129 	D40_DREG_RSEG4,
130 	D40_DREG_RCEG1,
131 	D40_DREG_RCEG2,
132 	D40_DREG_RCEG3,
133 	D40_DREG_RCEG4,
134 };
135 
136 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
137 
138 static u32 d40_backup_regs_v4b[] = {
139 	D40_DREG_CPSEG1,
140 	D40_DREG_CPSEG2,
141 	D40_DREG_CPSEG3,
142 	D40_DREG_CPSEG4,
143 	D40_DREG_CPSEG5,
144 	D40_DREG_CPCEG1,
145 	D40_DREG_CPCEG2,
146 	D40_DREG_CPCEG3,
147 	D40_DREG_CPCEG4,
148 	D40_DREG_CPCEG5,
149 	D40_DREG_CRSEG1,
150 	D40_DREG_CRSEG2,
151 	D40_DREG_CRSEG3,
152 	D40_DREG_CRSEG4,
153 	D40_DREG_CRSEG5,
154 	D40_DREG_CRCEG1,
155 	D40_DREG_CRCEG2,
156 	D40_DREG_CRCEG3,
157 	D40_DREG_CRCEG4,
158 	D40_DREG_CRCEG5,
159 };
160 
161 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
162 
163 static u32 d40_backup_regs_chan[] = {
164 	D40_CHAN_REG_SSCFG,
165 	D40_CHAN_REG_SSELT,
166 	D40_CHAN_REG_SSPTR,
167 	D40_CHAN_REG_SSLNK,
168 	D40_CHAN_REG_SDCFG,
169 	D40_CHAN_REG_SDELT,
170 	D40_CHAN_REG_SDPTR,
171 	D40_CHAN_REG_SDLNK,
172 };
173 
174 /**
175  * struct d40_interrupt_lookup - lookup table for interrupt handler
176  *
177  * @src: Interrupt mask register.
178  * @clr: Interrupt clear register.
179  * @is_error: true if this is an error interrupt.
180  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
181  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
182  */
183 struct d40_interrupt_lookup {
184 	u32 src;
185 	u32 clr;
186 	bool is_error;
187 	int offset;
188 };
189 
190 
191 static struct d40_interrupt_lookup il_v4a[] = {
192 	{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
193 	{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
194 	{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
195 	{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
196 	{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
197 	{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
198 	{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
199 	{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
200 	{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
201 	{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
202 };
203 
204 static struct d40_interrupt_lookup il_v4b[] = {
205 	{D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false,  0},
206 	{D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
207 	{D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
208 	{D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
209 	{D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
210 	{D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true,   0},
211 	{D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true,  32},
212 	{D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true,  64},
213 	{D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true,  96},
214 	{D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true,  128},
215 	{D40_DREG_CPCTIS,  D40_DREG_CPCICR,  false, D40_PHY_CHAN},
216 	{D40_DREG_CPCEIS,  D40_DREG_CPCICR,  true,  D40_PHY_CHAN},
217 };
218 
219 /**
220  * struct d40_reg_val - simple lookup struct
221  *
222  * @reg: The register.
223  * @val: The value that belongs to the register in reg.
224  */
225 struct d40_reg_val {
226 	unsigned int reg;
227 	unsigned int val;
228 };
229 
230 static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
231 	/* Clock every part of the DMA block from start */
232 	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
233 
234 	/* Interrupts on all logical channels */
235 	{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
236 	{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
237 	{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
238 	{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
239 	{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
240 	{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
241 	{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
242 	{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
243 	{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
244 	{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
245 	{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
246 	{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
247 };
248 static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
249 	/* Clock every part of the DMA block from start */
250 	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
251 
252 	/* Interrupts on all logical channels */
253 	{ .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
254 	{ .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
255 	{ .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
256 	{ .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
257 	{ .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
258 	{ .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
259 	{ .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
260 	{ .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
261 	{ .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
262 	{ .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
263 	{ .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
264 	{ .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
265 	{ .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
266 	{ .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
267 	{ .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
268 };
269 
270 /**
271  * struct d40_lli_pool - Structure for keeping LLIs in memory
272  *
273  * @base: Pointer to memory area when the pre_alloc_lli's are not large
274  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
275  * pre_alloc_lli is used.
276  * @dma_addr: DMA address, if mapped
277  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
278  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
279  * one buffer to one buffer.
280  */
281 struct d40_lli_pool {
282 	void	*base;
283 	int	 size;
284 	dma_addr_t	dma_addr;
285 	/* Space for dst and src, plus an extra for padding */
286 	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
287 };
288 
289 /**
290  * struct d40_desc - A descriptor is one DMA job.
291  *
292  * @lli_phy: LLI settings for physical channel. Both src and dst=
293  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
294  * lli_len equals one.
295  * @lli_log: Same as above but for logical channels.
296  * @lli_pool: The pool with two entries pre-allocated.
297  * @lli_len: Number of llis of current descriptor.
298  * @lli_current: Number of transferred llis.
299  * @lcla_alloc: Number of LCLA entries allocated.
300  * @txd: DMA engine struct. Used for among other things for communication
301  * during a transfer.
302  * @node: List entry.
303  * @is_in_client_list: true if the client owns this descriptor.
304  * @cyclic: true if this is a cyclic job
305  *
306  * This descriptor is used for both logical and physical transfers.
307  */
308 struct d40_desc {
309 	/* LLI physical */
310 	struct d40_phy_lli_bidir	 lli_phy;
311 	/* LLI logical */
312 	struct d40_log_lli_bidir	 lli_log;
313 
314 	struct d40_lli_pool		 lli_pool;
315 	int				 lli_len;
316 	int				 lli_current;
317 	int				 lcla_alloc;
318 
319 	struct dma_async_tx_descriptor	 txd;
320 	struct list_head		 node;
321 
322 	bool				 is_in_client_list;
323 	bool				 cyclic;
324 };
325 
326 /**
327  * struct d40_lcla_pool - LCLA pool settings and data.
328  *
329  * @base: The virtual address of LCLA. 18 bit aligned.
330  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
331  * This pointer is only there for clean-up on error.
332  * @pages: The number of pages needed for all physical channels.
333  * Only used later for clean-up on error
334  * @lock: Lock to protect the content in this struct.
335  * @alloc_map: big map over which LCLA entry is own by which job.
336  */
337 struct d40_lcla_pool {
338 	void		*base;
339 	dma_addr_t	dma_addr;
340 	void		*base_unaligned;
341 	int		 pages;
342 	spinlock_t	 lock;
343 	struct d40_desc	**alloc_map;
344 };
345 
346 /**
347  * struct d40_phy_res - struct for handling eventlines mapped to physical
348  * channels.
349  *
350  * @lock: A lock protection this entity.
351  * @reserved: True if used by secure world or otherwise.
352  * @num: The physical channel number of this entity.
353  * @allocated_src: Bit mapped to show which src event line's are mapped to
354  * this physical channel. Can also be free or physically allocated.
355  * @allocated_dst: Same as for src but is dst.
356  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
357  * event line number.
358  * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
359  */
360 struct d40_phy_res {
361 	spinlock_t lock;
362 	bool	   reserved;
363 	int	   num;
364 	u32	   allocated_src;
365 	u32	   allocated_dst;
366 	bool	   use_soft_lli;
367 };
368 
369 struct d40_base;
370 
371 /**
372  * struct d40_chan - Struct that describes a channel.
373  *
374  * @lock: A spinlock to protect this struct.
375  * @log_num: The logical number, if any of this channel.
376  * @pending_tx: The number of pending transfers. Used between interrupt handler
377  * and tasklet.
378  * @busy: Set to true when transfer is ongoing on this channel.
379  * @phy_chan: Pointer to physical channel which this instance runs on. If this
380  * point is NULL, then the channel is not allocated.
381  * @chan: DMA engine handle.
382  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
383  * transfer and call client callback.
384  * @client: Cliented owned descriptor list.
385  * @pending_queue: Submitted jobs, to be issued by issue_pending()
386  * @active: Active descriptor.
387  * @done: Completed jobs
388  * @queue: Queued jobs.
389  * @prepare_queue: Prepared jobs.
390  * @dma_cfg: The client configuration of this dma channel.
391  * @configured: whether the dma_cfg configuration is valid
392  * @base: Pointer to the device instance struct.
393  * @src_def_cfg: Default cfg register setting for src.
394  * @dst_def_cfg: Default cfg register setting for dst.
395  * @log_def: Default logical channel settings.
396  * @lcpa: Pointer to dst and src lcpa settings.
397  * @runtime_addr: runtime configured address.
398  * @runtime_direction: runtime configured direction.
399  *
400  * This struct can either "be" a logical or a physical channel.
401  */
402 struct d40_chan {
403 	spinlock_t			 lock;
404 	int				 log_num;
405 	int				 pending_tx;
406 	bool				 busy;
407 	struct d40_phy_res		*phy_chan;
408 	struct dma_chan			 chan;
409 	struct tasklet_struct		 tasklet;
410 	struct list_head		 client;
411 	struct list_head		 pending_queue;
412 	struct list_head		 active;
413 	struct list_head		 done;
414 	struct list_head		 queue;
415 	struct list_head		 prepare_queue;
416 	struct stedma40_chan_cfg	 dma_cfg;
417 	bool				 configured;
418 	struct d40_base			*base;
419 	/* Default register configurations */
420 	u32				 src_def_cfg;
421 	u32				 dst_def_cfg;
422 	struct d40_def_lcsp		 log_def;
423 	struct d40_log_lli_full		*lcpa;
424 	/* Runtime reconfiguration */
425 	dma_addr_t			runtime_addr;
426 	enum dma_transfer_direction	runtime_direction;
427 };
428 
429 /**
430  * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
431  * controller
432  *
433  * @backup: the pointer to the registers address array for backup
434  * @backup_size: the size of the registers address array for backup
435  * @realtime_en: the realtime enable register
436  * @realtime_clear: the realtime clear register
437  * @high_prio_en: the high priority enable register
438  * @high_prio_clear: the high priority clear register
439  * @interrupt_en: the interrupt enable register
440  * @interrupt_clear: the interrupt clear register
441  * @il: the pointer to struct d40_interrupt_lookup
442  * @il_size: the size of d40_interrupt_lookup array
443  * @init_reg: the pointer to the struct d40_reg_val
444  * @init_reg_size: the size of d40_reg_val array
445  */
446 struct d40_gen_dmac {
447 	u32				*backup;
448 	u32				 backup_size;
449 	u32				 realtime_en;
450 	u32				 realtime_clear;
451 	u32				 high_prio_en;
452 	u32				 high_prio_clear;
453 	u32				 interrupt_en;
454 	u32				 interrupt_clear;
455 	struct d40_interrupt_lookup	*il;
456 	u32				 il_size;
457 	struct d40_reg_val		*init_reg;
458 	u32				 init_reg_size;
459 };
460 
461 /**
462  * struct d40_base - The big global struct, one for each probe'd instance.
463  *
464  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
465  * @execmd_lock: Lock for execute command usage since several channels share
466  * the same physical register.
467  * @dev: The device structure.
468  * @virtbase: The virtual base address of the DMA's register.
469  * @rev: silicon revision detected.
470  * @clk: Pointer to the DMA clock structure.
471  * @phy_start: Physical memory start of the DMA registers.
472  * @phy_size: Size of the DMA register map.
473  * @irq: The IRQ number.
474  * @num_phy_chans: The number of physical channels. Read from HW. This
475  * is the number of available channels for this driver, not counting "Secure
476  * mode" allocated physical channels.
477  * @num_log_chans: The number of logical channels. Calculated from
478  * num_phy_chans.
479  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
480  * @dma_slave: dma_device channels that can do only do slave transfers.
481  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
482  * @phy_chans: Room for all possible physical channels in system.
483  * @log_chans: Room for all possible logical channels in system.
484  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
485  * to log_chans entries.
486  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
487  * to phy_chans entries.
488  * @plat_data: Pointer to provided platform_data which is the driver
489  * configuration.
490  * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
491  * @phy_res: Vector containing all physical channels.
492  * @lcla_pool: lcla pool settings and data.
493  * @lcpa_base: The virtual mapped address of LCPA.
494  * @phy_lcpa: The physical address of the LCPA.
495  * @lcpa_size: The size of the LCPA area.
496  * @desc_slab: cache for descriptors.
497  * @reg_val_backup: Here the values of some hardware registers are stored
498  * before the DMA is powered off. They are restored when the power is back on.
499  * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
500  * later
501  * @reg_val_backup_chan: Backup data for standard channel parameter registers.
502  * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
503  * @initialized: true if the dma has been initialized
504  * @gen_dmac: the struct for generic registers values to represent u8500/8540
505  * DMA controller
506  */
507 struct d40_base {
508 	spinlock_t			 interrupt_lock;
509 	spinlock_t			 execmd_lock;
510 	struct device			 *dev;
511 	void __iomem			 *virtbase;
512 	u8				  rev:4;
513 	struct clk			 *clk;
514 	phys_addr_t			  phy_start;
515 	resource_size_t			  phy_size;
516 	int				  irq;
517 	int				  num_phy_chans;
518 	int				  num_log_chans;
519 	struct device_dma_parameters	  dma_parms;
520 	struct dma_device		  dma_both;
521 	struct dma_device		  dma_slave;
522 	struct dma_device		  dma_memcpy;
523 	struct d40_chan			 *phy_chans;
524 	struct d40_chan			 *log_chans;
525 	struct d40_chan			**lookup_log_chans;
526 	struct d40_chan			**lookup_phy_chans;
527 	struct stedma40_platform_data	 *plat_data;
528 	struct regulator		 *lcpa_regulator;
529 	/* Physical half channels */
530 	struct d40_phy_res		 *phy_res;
531 	struct d40_lcla_pool		  lcla_pool;
532 	void				 *lcpa_base;
533 	dma_addr_t			  phy_lcpa;
534 	resource_size_t			  lcpa_size;
535 	struct kmem_cache		 *desc_slab;
536 	u32				  reg_val_backup[BACKUP_REGS_SZ];
537 	u32				  reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
538 	u32				 *reg_val_backup_chan;
539 	u16				  gcc_pwr_off_mask;
540 	bool				  initialized;
541 	struct d40_gen_dmac		  gen_dmac;
542 };
543 
chan2dev(struct d40_chan * d40c)544 static struct device *chan2dev(struct d40_chan *d40c)
545 {
546 	return &d40c->chan.dev->device;
547 }
548 
chan_is_physical(struct d40_chan * chan)549 static bool chan_is_physical(struct d40_chan *chan)
550 {
551 	return chan->log_num == D40_PHY_CHAN;
552 }
553 
chan_is_logical(struct d40_chan * chan)554 static bool chan_is_logical(struct d40_chan *chan)
555 {
556 	return !chan_is_physical(chan);
557 }
558 
chan_base(struct d40_chan * chan)559 static void __iomem *chan_base(struct d40_chan *chan)
560 {
561 	return chan->base->virtbase + D40_DREG_PCBASE +
562 	       chan->phy_chan->num * D40_DREG_PCDELTA;
563 }
564 
565 #define d40_err(dev, format, arg...)		\
566 	dev_err(dev, "[%s] " format, __func__, ## arg)
567 
568 #define chan_err(d40c, format, arg...)		\
569 	d40_err(chan2dev(d40c), format, ## arg)
570 
d40_pool_lli_alloc(struct d40_chan * d40c,struct d40_desc * d40d,int lli_len)571 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
572 			      int lli_len)
573 {
574 	bool is_log = chan_is_logical(d40c);
575 	u32 align;
576 	void *base;
577 
578 	if (is_log)
579 		align = sizeof(struct d40_log_lli);
580 	else
581 		align = sizeof(struct d40_phy_lli);
582 
583 	if (lli_len == 1) {
584 		base = d40d->lli_pool.pre_alloc_lli;
585 		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
586 		d40d->lli_pool.base = NULL;
587 	} else {
588 		d40d->lli_pool.size = lli_len * 2 * align;
589 
590 		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
591 		d40d->lli_pool.base = base;
592 
593 		if (d40d->lli_pool.base == NULL)
594 			return -ENOMEM;
595 	}
596 
597 	if (is_log) {
598 		d40d->lli_log.src = PTR_ALIGN(base, align);
599 		d40d->lli_log.dst = d40d->lli_log.src + lli_len;
600 
601 		d40d->lli_pool.dma_addr = 0;
602 	} else {
603 		d40d->lli_phy.src = PTR_ALIGN(base, align);
604 		d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
605 
606 		d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
607 							 d40d->lli_phy.src,
608 							 d40d->lli_pool.size,
609 							 DMA_TO_DEVICE);
610 
611 		if (dma_mapping_error(d40c->base->dev,
612 				      d40d->lli_pool.dma_addr)) {
613 			kfree(d40d->lli_pool.base);
614 			d40d->lli_pool.base = NULL;
615 			d40d->lli_pool.dma_addr = 0;
616 			return -ENOMEM;
617 		}
618 	}
619 
620 	return 0;
621 }
622 
d40_pool_lli_free(struct d40_chan * d40c,struct d40_desc * d40d)623 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
624 {
625 	if (d40d->lli_pool.dma_addr)
626 		dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
627 				 d40d->lli_pool.size, DMA_TO_DEVICE);
628 
629 	kfree(d40d->lli_pool.base);
630 	d40d->lli_pool.base = NULL;
631 	d40d->lli_pool.size = 0;
632 	d40d->lli_log.src = NULL;
633 	d40d->lli_log.dst = NULL;
634 	d40d->lli_phy.src = NULL;
635 	d40d->lli_phy.dst = NULL;
636 }
637 
d40_lcla_alloc_one(struct d40_chan * d40c,struct d40_desc * d40d)638 static int d40_lcla_alloc_one(struct d40_chan *d40c,
639 			      struct d40_desc *d40d)
640 {
641 	unsigned long flags;
642 	int i;
643 	int ret = -EINVAL;
644 
645 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
646 
647 	/*
648 	 * Allocate both src and dst at the same time, therefore the half
649 	 * start on 1 since 0 can't be used since zero is used as end marker.
650 	 */
651 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
652 		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
653 
654 		if (!d40c->base->lcla_pool.alloc_map[idx]) {
655 			d40c->base->lcla_pool.alloc_map[idx] = d40d;
656 			d40d->lcla_alloc++;
657 			ret = i;
658 			break;
659 		}
660 	}
661 
662 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
663 
664 	return ret;
665 }
666 
d40_lcla_free_all(struct d40_chan * d40c,struct d40_desc * d40d)667 static int d40_lcla_free_all(struct d40_chan *d40c,
668 			     struct d40_desc *d40d)
669 {
670 	unsigned long flags;
671 	int i;
672 	int ret = -EINVAL;
673 
674 	if (chan_is_physical(d40c))
675 		return 0;
676 
677 	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
678 
679 	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
680 		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
681 
682 		if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
683 			d40c->base->lcla_pool.alloc_map[idx] = NULL;
684 			d40d->lcla_alloc--;
685 			if (d40d->lcla_alloc == 0) {
686 				ret = 0;
687 				break;
688 			}
689 		}
690 	}
691 
692 	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
693 
694 	return ret;
695 
696 }
697 
d40_desc_remove(struct d40_desc * d40d)698 static void d40_desc_remove(struct d40_desc *d40d)
699 {
700 	list_del(&d40d->node);
701 }
702 
d40_desc_get(struct d40_chan * d40c)703 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
704 {
705 	struct d40_desc *desc = NULL;
706 
707 	if (!list_empty(&d40c->client)) {
708 		struct d40_desc *d;
709 		struct d40_desc *_d;
710 
711 		list_for_each_entry_safe(d, _d, &d40c->client, node) {
712 			if (async_tx_test_ack(&d->txd)) {
713 				d40_desc_remove(d);
714 				desc = d;
715 				memset(desc, 0, sizeof(*desc));
716 				break;
717 			}
718 		}
719 	}
720 
721 	if (!desc)
722 		desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
723 
724 	if (desc)
725 		INIT_LIST_HEAD(&desc->node);
726 
727 	return desc;
728 }
729 
d40_desc_free(struct d40_chan * d40c,struct d40_desc * d40d)730 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
731 {
732 
733 	d40_pool_lli_free(d40c, d40d);
734 	d40_lcla_free_all(d40c, d40d);
735 	kmem_cache_free(d40c->base->desc_slab, d40d);
736 }
737 
d40_desc_submit(struct d40_chan * d40c,struct d40_desc * desc)738 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
739 {
740 	list_add_tail(&desc->node, &d40c->active);
741 }
742 
d40_phy_lli_load(struct d40_chan * chan,struct d40_desc * desc)743 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
744 {
745 	struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
746 	struct d40_phy_lli *lli_src = desc->lli_phy.src;
747 	void __iomem *base = chan_base(chan);
748 
749 	writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
750 	writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
751 	writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
752 	writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
753 
754 	writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
755 	writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
756 	writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
757 	writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
758 }
759 
d40_desc_done(struct d40_chan * d40c,struct d40_desc * desc)760 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
761 {
762 	list_add_tail(&desc->node, &d40c->done);
763 }
764 
d40_log_lli_to_lcxa(struct d40_chan * chan,struct d40_desc * desc)765 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
766 {
767 	struct d40_lcla_pool *pool = &chan->base->lcla_pool;
768 	struct d40_log_lli_bidir *lli = &desc->lli_log;
769 	int lli_current = desc->lli_current;
770 	int lli_len = desc->lli_len;
771 	bool cyclic = desc->cyclic;
772 	int curr_lcla = -EINVAL;
773 	int first_lcla = 0;
774 	bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
775 	bool linkback;
776 
777 	/*
778 	 * We may have partially running cyclic transfers, in case we did't get
779 	 * enough LCLA entries.
780 	 */
781 	linkback = cyclic && lli_current == 0;
782 
783 	/*
784 	 * For linkback, we need one LCLA even with only one link, because we
785 	 * can't link back to the one in LCPA space
786 	 */
787 	if (linkback || (lli_len - lli_current > 1)) {
788 		/*
789 		 * If the channel is expected to use only soft_lli don't
790 		 * allocate a lcla. This is to avoid a HW issue that exists
791 		 * in some controller during a peripheral to memory transfer
792 		 * that uses linked lists.
793 		 */
794 		if (!(chan->phy_chan->use_soft_lli &&
795 			chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
796 			curr_lcla = d40_lcla_alloc_one(chan, desc);
797 
798 		first_lcla = curr_lcla;
799 	}
800 
801 	/*
802 	 * For linkback, we normally load the LCPA in the loop since we need to
803 	 * link it to the second LCLA and not the first.  However, if we
804 	 * couldn't even get a first LCLA, then we have to run in LCPA and
805 	 * reload manually.
806 	 */
807 	if (!linkback || curr_lcla == -EINVAL) {
808 		unsigned int flags = 0;
809 
810 		if (curr_lcla == -EINVAL)
811 			flags |= LLI_TERM_INT;
812 
813 		d40_log_lli_lcpa_write(chan->lcpa,
814 				       &lli->dst[lli_current],
815 				       &lli->src[lli_current],
816 				       curr_lcla,
817 				       flags);
818 		lli_current++;
819 	}
820 
821 	if (curr_lcla < 0)
822 		goto out;
823 
824 	for (; lli_current < lli_len; lli_current++) {
825 		unsigned int lcla_offset = chan->phy_chan->num * 1024 +
826 					   8 * curr_lcla * 2;
827 		struct d40_log_lli *lcla = pool->base + lcla_offset;
828 		unsigned int flags = 0;
829 		int next_lcla;
830 
831 		if (lli_current + 1 < lli_len)
832 			next_lcla = d40_lcla_alloc_one(chan, desc);
833 		else
834 			next_lcla = linkback ? first_lcla : -EINVAL;
835 
836 		if (cyclic || next_lcla == -EINVAL)
837 			flags |= LLI_TERM_INT;
838 
839 		if (linkback && curr_lcla == first_lcla) {
840 			/* First link goes in both LCPA and LCLA */
841 			d40_log_lli_lcpa_write(chan->lcpa,
842 					       &lli->dst[lli_current],
843 					       &lli->src[lli_current],
844 					       next_lcla, flags);
845 		}
846 
847 		/*
848 		 * One unused LCLA in the cyclic case if the very first
849 		 * next_lcla fails...
850 		 */
851 		d40_log_lli_lcla_write(lcla,
852 				       &lli->dst[lli_current],
853 				       &lli->src[lli_current],
854 				       next_lcla, flags);
855 
856 		/*
857 		 * Cache maintenance is not needed if lcla is
858 		 * mapped in esram
859 		 */
860 		if (!use_esram_lcla) {
861 			dma_sync_single_range_for_device(chan->base->dev,
862 						pool->dma_addr, lcla_offset,
863 						2 * sizeof(struct d40_log_lli),
864 						DMA_TO_DEVICE);
865 		}
866 		curr_lcla = next_lcla;
867 
868 		if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
869 			lli_current++;
870 			break;
871 		}
872 	}
873 
874 out:
875 	desc->lli_current = lli_current;
876 }
877 
d40_desc_load(struct d40_chan * d40c,struct d40_desc * d40d)878 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
879 {
880 	if (chan_is_physical(d40c)) {
881 		d40_phy_lli_load(d40c, d40d);
882 		d40d->lli_current = d40d->lli_len;
883 	} else
884 		d40_log_lli_to_lcxa(d40c, d40d);
885 }
886 
d40_first_active_get(struct d40_chan * d40c)887 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
888 {
889 	struct d40_desc *d;
890 
891 	if (list_empty(&d40c->active))
892 		return NULL;
893 
894 	d = list_first_entry(&d40c->active,
895 			     struct d40_desc,
896 			     node);
897 	return d;
898 }
899 
900 /* remove desc from current queue and add it to the pending_queue */
d40_desc_queue(struct d40_chan * d40c,struct d40_desc * desc)901 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
902 {
903 	d40_desc_remove(desc);
904 	desc->is_in_client_list = false;
905 	list_add_tail(&desc->node, &d40c->pending_queue);
906 }
907 
d40_first_pending(struct d40_chan * d40c)908 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
909 {
910 	struct d40_desc *d;
911 
912 	if (list_empty(&d40c->pending_queue))
913 		return NULL;
914 
915 	d = list_first_entry(&d40c->pending_queue,
916 			     struct d40_desc,
917 			     node);
918 	return d;
919 }
920 
d40_first_queued(struct d40_chan * d40c)921 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
922 {
923 	struct d40_desc *d;
924 
925 	if (list_empty(&d40c->queue))
926 		return NULL;
927 
928 	d = list_first_entry(&d40c->queue,
929 			     struct d40_desc,
930 			     node);
931 	return d;
932 }
933 
d40_first_done(struct d40_chan * d40c)934 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
935 {
936 	if (list_empty(&d40c->done))
937 		return NULL;
938 
939 	return list_first_entry(&d40c->done, struct d40_desc, node);
940 }
941 
d40_psize_2_burst_size(bool is_log,int psize)942 static int d40_psize_2_burst_size(bool is_log, int psize)
943 {
944 	if (is_log) {
945 		if (psize == STEDMA40_PSIZE_LOG_1)
946 			return 1;
947 	} else {
948 		if (psize == STEDMA40_PSIZE_PHY_1)
949 			return 1;
950 	}
951 
952 	return 2 << psize;
953 }
954 
955 /*
956  * The dma only supports transmitting packages up to
957  * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
958  * dma elements required to send the entire sg list
959  */
d40_size_2_dmalen(int size,u32 data_width1,u32 data_width2)960 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
961 {
962 	int dmalen;
963 	u32 max_w = max(data_width1, data_width2);
964 	u32 min_w = min(data_width1, data_width2);
965 	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
966 
967 	if (seg_max > STEDMA40_MAX_SEG_SIZE)
968 		seg_max -= (1 << max_w);
969 
970 	if (!IS_ALIGNED(size, 1 << max_w))
971 		return -EINVAL;
972 
973 	if (size <= seg_max)
974 		dmalen = 1;
975 	else {
976 		dmalen = size / seg_max;
977 		if (dmalen * seg_max < size)
978 			dmalen++;
979 	}
980 	return dmalen;
981 }
982 
d40_sg_2_dmalen(struct scatterlist * sgl,int sg_len,u32 data_width1,u32 data_width2)983 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
984 			   u32 data_width1, u32 data_width2)
985 {
986 	struct scatterlist *sg;
987 	int i;
988 	int len = 0;
989 	int ret;
990 
991 	for_each_sg(sgl, sg, sg_len, i) {
992 		ret = d40_size_2_dmalen(sg_dma_len(sg),
993 					data_width1, data_width2);
994 		if (ret < 0)
995 			return ret;
996 		len += ret;
997 	}
998 	return len;
999 }
1000 
1001 
1002 #ifdef CONFIG_PM
dma40_backup(void __iomem * baseaddr,u32 * backup,u32 * regaddr,int num,bool save)1003 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
1004 			 u32 *regaddr, int num, bool save)
1005 {
1006 	int i;
1007 
1008 	for (i = 0; i < num; i++) {
1009 		void __iomem *addr = baseaddr + regaddr[i];
1010 
1011 		if (save)
1012 			backup[i] = readl_relaxed(addr);
1013 		else
1014 			writel_relaxed(backup[i], addr);
1015 	}
1016 }
1017 
d40_save_restore_registers(struct d40_base * base,bool save)1018 static void d40_save_restore_registers(struct d40_base *base, bool save)
1019 {
1020 	int i;
1021 
1022 	/* Save/Restore channel specific registers */
1023 	for (i = 0; i < base->num_phy_chans; i++) {
1024 		void __iomem *addr;
1025 		int idx;
1026 
1027 		if (base->phy_res[i].reserved)
1028 			continue;
1029 
1030 		addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
1031 		idx = i * ARRAY_SIZE(d40_backup_regs_chan);
1032 
1033 		dma40_backup(addr, &base->reg_val_backup_chan[idx],
1034 			     d40_backup_regs_chan,
1035 			     ARRAY_SIZE(d40_backup_regs_chan),
1036 			     save);
1037 	}
1038 
1039 	/* Save/Restore global registers */
1040 	dma40_backup(base->virtbase, base->reg_val_backup,
1041 		     d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
1042 		     save);
1043 
1044 	/* Save/Restore registers only existing on dma40 v3 and later */
1045 	if (base->gen_dmac.backup)
1046 		dma40_backup(base->virtbase, base->reg_val_backup_v4,
1047 			     base->gen_dmac.backup,
1048 			base->gen_dmac.backup_size,
1049 			save);
1050 }
1051 #else
d40_save_restore_registers(struct d40_base * base,bool save)1052 static void d40_save_restore_registers(struct d40_base *base, bool save)
1053 {
1054 }
1055 #endif
1056 
__d40_execute_command_phy(struct d40_chan * d40c,enum d40_command command)1057 static int __d40_execute_command_phy(struct d40_chan *d40c,
1058 				     enum d40_command command)
1059 {
1060 	u32 status;
1061 	int i;
1062 	void __iomem *active_reg;
1063 	int ret = 0;
1064 	unsigned long flags;
1065 	u32 wmask;
1066 
1067 	if (command == D40_DMA_STOP) {
1068 		ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1069 		if (ret)
1070 			return ret;
1071 	}
1072 
1073 	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1074 
1075 	if (d40c->phy_chan->num % 2 == 0)
1076 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1077 	else
1078 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1079 
1080 	if (command == D40_DMA_SUSPEND_REQ) {
1081 		status = (readl(active_reg) &
1082 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1083 			D40_CHAN_POS(d40c->phy_chan->num);
1084 
1085 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1086 			goto done;
1087 	}
1088 
1089 	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1090 	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1091 	       active_reg);
1092 
1093 	if (command == D40_DMA_SUSPEND_REQ) {
1094 
1095 		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1096 			status = (readl(active_reg) &
1097 				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1098 				D40_CHAN_POS(d40c->phy_chan->num);
1099 
1100 			cpu_relax();
1101 			/*
1102 			 * Reduce the number of bus accesses while
1103 			 * waiting for the DMA to suspend.
1104 			 */
1105 			udelay(3);
1106 
1107 			if (status == D40_DMA_STOP ||
1108 			    status == D40_DMA_SUSPENDED)
1109 				break;
1110 		}
1111 
1112 		if (i == D40_SUSPEND_MAX_IT) {
1113 			chan_err(d40c,
1114 				"unable to suspend the chl %d (log: %d) status %x\n",
1115 				d40c->phy_chan->num, d40c->log_num,
1116 				status);
1117 			dump_stack();
1118 			ret = -EBUSY;
1119 		}
1120 
1121 	}
1122 done:
1123 	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1124 	return ret;
1125 }
1126 
d40_term_all(struct d40_chan * d40c)1127 static void d40_term_all(struct d40_chan *d40c)
1128 {
1129 	struct d40_desc *d40d;
1130 	struct d40_desc *_d;
1131 
1132 	/* Release completed descriptors */
1133 	while ((d40d = d40_first_done(d40c))) {
1134 		d40_desc_remove(d40d);
1135 		d40_desc_free(d40c, d40d);
1136 	}
1137 
1138 	/* Release active descriptors */
1139 	while ((d40d = d40_first_active_get(d40c))) {
1140 		d40_desc_remove(d40d);
1141 		d40_desc_free(d40c, d40d);
1142 	}
1143 
1144 	/* Release queued descriptors waiting for transfer */
1145 	while ((d40d = d40_first_queued(d40c))) {
1146 		d40_desc_remove(d40d);
1147 		d40_desc_free(d40c, d40d);
1148 	}
1149 
1150 	/* Release pending descriptors */
1151 	while ((d40d = d40_first_pending(d40c))) {
1152 		d40_desc_remove(d40d);
1153 		d40_desc_free(d40c, d40d);
1154 	}
1155 
1156 	/* Release client owned descriptors */
1157 	if (!list_empty(&d40c->client))
1158 		list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1159 			d40_desc_remove(d40d);
1160 			d40_desc_free(d40c, d40d);
1161 		}
1162 
1163 	/* Release descriptors in prepare queue */
1164 	if (!list_empty(&d40c->prepare_queue))
1165 		list_for_each_entry_safe(d40d, _d,
1166 					 &d40c->prepare_queue, node) {
1167 			d40_desc_remove(d40d);
1168 			d40_desc_free(d40c, d40d);
1169 		}
1170 
1171 	d40c->pending_tx = 0;
1172 }
1173 
__d40_config_set_event(struct d40_chan * d40c,enum d40_events event_type,u32 event,int reg)1174 static void __d40_config_set_event(struct d40_chan *d40c,
1175 				   enum d40_events event_type, u32 event,
1176 				   int reg)
1177 {
1178 	void __iomem *addr = chan_base(d40c) + reg;
1179 	int tries;
1180 	u32 status;
1181 
1182 	switch (event_type) {
1183 
1184 	case D40_DEACTIVATE_EVENTLINE:
1185 
1186 		writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1187 		       | ~D40_EVENTLINE_MASK(event), addr);
1188 		break;
1189 
1190 	case D40_SUSPEND_REQ_EVENTLINE:
1191 		status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1192 			  D40_EVENTLINE_POS(event);
1193 
1194 		if (status == D40_DEACTIVATE_EVENTLINE ||
1195 		    status == D40_SUSPEND_REQ_EVENTLINE)
1196 			break;
1197 
1198 		writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1199 		       | ~D40_EVENTLINE_MASK(event), addr);
1200 
1201 		for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1202 
1203 			status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1204 				  D40_EVENTLINE_POS(event);
1205 
1206 			cpu_relax();
1207 			/*
1208 			 * Reduce the number of bus accesses while
1209 			 * waiting for the DMA to suspend.
1210 			 */
1211 			udelay(3);
1212 
1213 			if (status == D40_DEACTIVATE_EVENTLINE)
1214 				break;
1215 		}
1216 
1217 		if (tries == D40_SUSPEND_MAX_IT) {
1218 			chan_err(d40c,
1219 				"unable to stop the event_line chl %d (log: %d)"
1220 				"status %x\n", d40c->phy_chan->num,
1221 				 d40c->log_num, status);
1222 		}
1223 		break;
1224 
1225 	case D40_ACTIVATE_EVENTLINE:
1226 	/*
1227 	 * The hardware sometimes doesn't register the enable when src and dst
1228 	 * event lines are active on the same logical channel.  Retry to ensure
1229 	 * it does.  Usually only one retry is sufficient.
1230 	 */
1231 		tries = 100;
1232 		while (--tries) {
1233 			writel((D40_ACTIVATE_EVENTLINE <<
1234 				D40_EVENTLINE_POS(event)) |
1235 				~D40_EVENTLINE_MASK(event), addr);
1236 
1237 			if (readl(addr) & D40_EVENTLINE_MASK(event))
1238 				break;
1239 		}
1240 
1241 		if (tries != 99)
1242 			dev_dbg(chan2dev(d40c),
1243 				"[%s] workaround enable S%cLNK (%d tries)\n",
1244 				__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1245 				100 - tries);
1246 
1247 		WARN_ON(!tries);
1248 		break;
1249 
1250 	case D40_ROUND_EVENTLINE:
1251 		BUG();
1252 		break;
1253 
1254 	}
1255 }
1256 
d40_config_set_event(struct d40_chan * d40c,enum d40_events event_type)1257 static void d40_config_set_event(struct d40_chan *d40c,
1258 				 enum d40_events event_type)
1259 {
1260 	/* Enable event line connected to device (or memcpy) */
1261 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
1262 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1263 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1264 
1265 		__d40_config_set_event(d40c, event_type, event,
1266 				       D40_CHAN_REG_SSLNK);
1267 	}
1268 
1269 	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
1270 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1271 
1272 		__d40_config_set_event(d40c, event_type, event,
1273 				       D40_CHAN_REG_SDLNK);
1274 	}
1275 }
1276 
d40_chan_has_events(struct d40_chan * d40c)1277 static u32 d40_chan_has_events(struct d40_chan *d40c)
1278 {
1279 	void __iomem *chanbase = chan_base(d40c);
1280 	u32 val;
1281 
1282 	val = readl(chanbase + D40_CHAN_REG_SSLNK);
1283 	val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1284 
1285 	return val;
1286 }
1287 
1288 static int
__d40_execute_command_log(struct d40_chan * d40c,enum d40_command command)1289 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1290 {
1291 	unsigned long flags;
1292 	int ret = 0;
1293 	u32 active_status;
1294 	void __iomem *active_reg;
1295 
1296 	if (d40c->phy_chan->num % 2 == 0)
1297 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1298 	else
1299 		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1300 
1301 
1302 	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1303 
1304 	switch (command) {
1305 	case D40_DMA_STOP:
1306 	case D40_DMA_SUSPEND_REQ:
1307 
1308 		active_status = (readl(active_reg) &
1309 				 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1310 				 D40_CHAN_POS(d40c->phy_chan->num);
1311 
1312 		if (active_status == D40_DMA_RUN)
1313 			d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1314 		else
1315 			d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1316 
1317 		if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1318 			ret = __d40_execute_command_phy(d40c, command);
1319 
1320 		break;
1321 
1322 	case D40_DMA_RUN:
1323 
1324 		d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1325 		ret = __d40_execute_command_phy(d40c, command);
1326 		break;
1327 
1328 	case D40_DMA_SUSPENDED:
1329 		BUG();
1330 		break;
1331 	}
1332 
1333 	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1334 	return ret;
1335 }
1336 
d40_channel_execute_command(struct d40_chan * d40c,enum d40_command command)1337 static int d40_channel_execute_command(struct d40_chan *d40c,
1338 				       enum d40_command command)
1339 {
1340 	if (chan_is_logical(d40c))
1341 		return __d40_execute_command_log(d40c, command);
1342 	else
1343 		return __d40_execute_command_phy(d40c, command);
1344 }
1345 
d40_get_prmo(struct d40_chan * d40c)1346 static u32 d40_get_prmo(struct d40_chan *d40c)
1347 {
1348 	static const unsigned int phy_map[] = {
1349 		[STEDMA40_PCHAN_BASIC_MODE]
1350 			= D40_DREG_PRMO_PCHAN_BASIC,
1351 		[STEDMA40_PCHAN_MODULO_MODE]
1352 			= D40_DREG_PRMO_PCHAN_MODULO,
1353 		[STEDMA40_PCHAN_DOUBLE_DST_MODE]
1354 			= D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1355 	};
1356 	static const unsigned int log_map[] = {
1357 		[STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1358 			= D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1359 		[STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1360 			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1361 		[STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1362 			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1363 	};
1364 
1365 	if (chan_is_physical(d40c))
1366 		return phy_map[d40c->dma_cfg.mode_opt];
1367 	else
1368 		return log_map[d40c->dma_cfg.mode_opt];
1369 }
1370 
d40_config_write(struct d40_chan * d40c)1371 static void d40_config_write(struct d40_chan *d40c)
1372 {
1373 	u32 addr_base;
1374 	u32 var;
1375 
1376 	/* Odd addresses are even addresses + 4 */
1377 	addr_base = (d40c->phy_chan->num % 2) * 4;
1378 	/* Setup channel mode to logical or physical */
1379 	var = ((u32)(chan_is_logical(d40c)) + 1) <<
1380 		D40_CHAN_POS(d40c->phy_chan->num);
1381 	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1382 
1383 	/* Setup operational mode option register */
1384 	var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1385 
1386 	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1387 
1388 	if (chan_is_logical(d40c)) {
1389 		int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1390 			   & D40_SREG_ELEM_LOG_LIDX_MASK;
1391 		void __iomem *chanbase = chan_base(d40c);
1392 
1393 		/* Set default config for CFG reg */
1394 		writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1395 		writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1396 
1397 		/* Set LIDX for lcla */
1398 		writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1399 		writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1400 
1401 		/* Clear LNK which will be used by d40_chan_has_events() */
1402 		writel(0, chanbase + D40_CHAN_REG_SSLNK);
1403 		writel(0, chanbase + D40_CHAN_REG_SDLNK);
1404 	}
1405 }
1406 
d40_residue(struct d40_chan * d40c)1407 static u32 d40_residue(struct d40_chan *d40c)
1408 {
1409 	u32 num_elt;
1410 
1411 	if (chan_is_logical(d40c))
1412 		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1413 			>> D40_MEM_LCSP2_ECNT_POS;
1414 	else {
1415 		u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1416 		num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1417 			  >> D40_SREG_ELEM_PHY_ECNT_POS;
1418 	}
1419 
1420 	return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1421 }
1422 
d40_tx_is_linked(struct d40_chan * d40c)1423 static bool d40_tx_is_linked(struct d40_chan *d40c)
1424 {
1425 	bool is_link;
1426 
1427 	if (chan_is_logical(d40c))
1428 		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1429 	else
1430 		is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1431 			  & D40_SREG_LNK_PHYS_LNK_MASK;
1432 
1433 	return is_link;
1434 }
1435 
d40_pause(struct d40_chan * d40c)1436 static int d40_pause(struct d40_chan *d40c)
1437 {
1438 	int res = 0;
1439 	unsigned long flags;
1440 
1441 	if (!d40c->busy)
1442 		return 0;
1443 
1444 	pm_runtime_get_sync(d40c->base->dev);
1445 	spin_lock_irqsave(&d40c->lock, flags);
1446 
1447 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1448 
1449 	pm_runtime_mark_last_busy(d40c->base->dev);
1450 	pm_runtime_put_autosuspend(d40c->base->dev);
1451 	spin_unlock_irqrestore(&d40c->lock, flags);
1452 	return res;
1453 }
1454 
d40_resume(struct d40_chan * d40c)1455 static int d40_resume(struct d40_chan *d40c)
1456 {
1457 	int res = 0;
1458 	unsigned long flags;
1459 
1460 	if (!d40c->busy)
1461 		return 0;
1462 
1463 	spin_lock_irqsave(&d40c->lock, flags);
1464 	pm_runtime_get_sync(d40c->base->dev);
1465 
1466 	/* If bytes left to transfer or linked tx resume job */
1467 	if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1468 		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1469 
1470 	pm_runtime_mark_last_busy(d40c->base->dev);
1471 	pm_runtime_put_autosuspend(d40c->base->dev);
1472 	spin_unlock_irqrestore(&d40c->lock, flags);
1473 	return res;
1474 }
1475 
d40_tx_submit(struct dma_async_tx_descriptor * tx)1476 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1477 {
1478 	struct d40_chan *d40c = container_of(tx->chan,
1479 					     struct d40_chan,
1480 					     chan);
1481 	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1482 	unsigned long flags;
1483 	dma_cookie_t cookie;
1484 
1485 	spin_lock_irqsave(&d40c->lock, flags);
1486 	cookie = dma_cookie_assign(tx);
1487 	d40_desc_queue(d40c, d40d);
1488 	spin_unlock_irqrestore(&d40c->lock, flags);
1489 
1490 	return cookie;
1491 }
1492 
d40_start(struct d40_chan * d40c)1493 static int d40_start(struct d40_chan *d40c)
1494 {
1495 	return d40_channel_execute_command(d40c, D40_DMA_RUN);
1496 }
1497 
d40_queue_start(struct d40_chan * d40c)1498 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1499 {
1500 	struct d40_desc *d40d;
1501 	int err;
1502 
1503 	/* Start queued jobs, if any */
1504 	d40d = d40_first_queued(d40c);
1505 
1506 	if (d40d != NULL) {
1507 		if (!d40c->busy) {
1508 			d40c->busy = true;
1509 			pm_runtime_get_sync(d40c->base->dev);
1510 		}
1511 
1512 		/* Remove from queue */
1513 		d40_desc_remove(d40d);
1514 
1515 		/* Add to active queue */
1516 		d40_desc_submit(d40c, d40d);
1517 
1518 		/* Initiate DMA job */
1519 		d40_desc_load(d40c, d40d);
1520 
1521 		/* Start dma job */
1522 		err = d40_start(d40c);
1523 
1524 		if (err)
1525 			return NULL;
1526 	}
1527 
1528 	return d40d;
1529 }
1530 
1531 /* called from interrupt context */
dma_tc_handle(struct d40_chan * d40c)1532 static void dma_tc_handle(struct d40_chan *d40c)
1533 {
1534 	struct d40_desc *d40d;
1535 
1536 	/* Get first active entry from list */
1537 	d40d = d40_first_active_get(d40c);
1538 
1539 	if (d40d == NULL)
1540 		return;
1541 
1542 	if (d40d->cyclic) {
1543 		/*
1544 		 * If this was a paritially loaded list, we need to reloaded
1545 		 * it, and only when the list is completed.  We need to check
1546 		 * for done because the interrupt will hit for every link, and
1547 		 * not just the last one.
1548 		 */
1549 		if (d40d->lli_current < d40d->lli_len
1550 		    && !d40_tx_is_linked(d40c)
1551 		    && !d40_residue(d40c)) {
1552 			d40_lcla_free_all(d40c, d40d);
1553 			d40_desc_load(d40c, d40d);
1554 			(void) d40_start(d40c);
1555 
1556 			if (d40d->lli_current == d40d->lli_len)
1557 				d40d->lli_current = 0;
1558 		}
1559 	} else {
1560 		d40_lcla_free_all(d40c, d40d);
1561 
1562 		if (d40d->lli_current < d40d->lli_len) {
1563 			d40_desc_load(d40c, d40d);
1564 			/* Start dma job */
1565 			(void) d40_start(d40c);
1566 			return;
1567 		}
1568 
1569 		if (d40_queue_start(d40c) == NULL) {
1570 			d40c->busy = false;
1571 
1572 			pm_runtime_mark_last_busy(d40c->base->dev);
1573 			pm_runtime_put_autosuspend(d40c->base->dev);
1574 		}
1575 
1576 		d40_desc_remove(d40d);
1577 		d40_desc_done(d40c, d40d);
1578 	}
1579 
1580 	d40c->pending_tx++;
1581 	tasklet_schedule(&d40c->tasklet);
1582 
1583 }
1584 
dma_tasklet(unsigned long data)1585 static void dma_tasklet(unsigned long data)
1586 {
1587 	struct d40_chan *d40c = (struct d40_chan *) data;
1588 	struct d40_desc *d40d;
1589 	unsigned long flags;
1590 	dma_async_tx_callback callback;
1591 	void *callback_param;
1592 
1593 	spin_lock_irqsave(&d40c->lock, flags);
1594 
1595 	/* Get first entry from the done list */
1596 	d40d = d40_first_done(d40c);
1597 	if (d40d == NULL) {
1598 		/* Check if we have reached here for cyclic job */
1599 		d40d = d40_first_active_get(d40c);
1600 		if (d40d == NULL || !d40d->cyclic)
1601 			goto err;
1602 	}
1603 
1604 	if (!d40d->cyclic)
1605 		dma_cookie_complete(&d40d->txd);
1606 
1607 	/*
1608 	 * If terminating a channel pending_tx is set to zero.
1609 	 * This prevents any finished active jobs to return to the client.
1610 	 */
1611 	if (d40c->pending_tx == 0) {
1612 		spin_unlock_irqrestore(&d40c->lock, flags);
1613 		return;
1614 	}
1615 
1616 	/* Callback to client */
1617 	callback = d40d->txd.callback;
1618 	callback_param = d40d->txd.callback_param;
1619 
1620 	if (!d40d->cyclic) {
1621 		if (async_tx_test_ack(&d40d->txd)) {
1622 			d40_desc_remove(d40d);
1623 			d40_desc_free(d40c, d40d);
1624 		} else if (!d40d->is_in_client_list) {
1625 			d40_desc_remove(d40d);
1626 			d40_lcla_free_all(d40c, d40d);
1627 			list_add_tail(&d40d->node, &d40c->client);
1628 			d40d->is_in_client_list = true;
1629 		}
1630 	}
1631 
1632 	d40c->pending_tx--;
1633 
1634 	if (d40c->pending_tx)
1635 		tasklet_schedule(&d40c->tasklet);
1636 
1637 	spin_unlock_irqrestore(&d40c->lock, flags);
1638 
1639 	if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1640 		callback(callback_param);
1641 
1642 	return;
1643 
1644 err:
1645 	/* Rescue manouver if receiving double interrupts */
1646 	if (d40c->pending_tx > 0)
1647 		d40c->pending_tx--;
1648 	spin_unlock_irqrestore(&d40c->lock, flags);
1649 }
1650 
d40_handle_interrupt(int irq,void * data)1651 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1652 {
1653 	int i;
1654 	u32 idx;
1655 	u32 row;
1656 	long chan = -1;
1657 	struct d40_chan *d40c;
1658 	unsigned long flags;
1659 	struct d40_base *base = data;
1660 	u32 regs[base->gen_dmac.il_size];
1661 	struct d40_interrupt_lookup *il = base->gen_dmac.il;
1662 	u32 il_size = base->gen_dmac.il_size;
1663 
1664 	spin_lock_irqsave(&base->interrupt_lock, flags);
1665 
1666 	/* Read interrupt status of both logical and physical channels */
1667 	for (i = 0; i < il_size; i++)
1668 		regs[i] = readl(base->virtbase + il[i].src);
1669 
1670 	for (;;) {
1671 
1672 		chan = find_next_bit((unsigned long *)regs,
1673 				     BITS_PER_LONG * il_size, chan + 1);
1674 
1675 		/* No more set bits found? */
1676 		if (chan == BITS_PER_LONG * il_size)
1677 			break;
1678 
1679 		row = chan / BITS_PER_LONG;
1680 		idx = chan & (BITS_PER_LONG - 1);
1681 
1682 		if (il[row].offset == D40_PHY_CHAN)
1683 			d40c = base->lookup_phy_chans[idx];
1684 		else
1685 			d40c = base->lookup_log_chans[il[row].offset + idx];
1686 
1687 		if (!d40c) {
1688 			/*
1689 			 * No error because this can happen if something else
1690 			 * in the system is using the channel.
1691 			 */
1692 			continue;
1693 		}
1694 
1695 		/* ACK interrupt */
1696 		writel(1 << idx, base->virtbase + il[row].clr);
1697 
1698 		spin_lock(&d40c->lock);
1699 
1700 		if (!il[row].is_error)
1701 			dma_tc_handle(d40c);
1702 		else
1703 			d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1704 				chan, il[row].offset, idx);
1705 
1706 		spin_unlock(&d40c->lock);
1707 	}
1708 
1709 	spin_unlock_irqrestore(&base->interrupt_lock, flags);
1710 
1711 	return IRQ_HANDLED;
1712 }
1713 
d40_validate_conf(struct d40_chan * d40c,struct stedma40_chan_cfg * conf)1714 static int d40_validate_conf(struct d40_chan *d40c,
1715 			     struct stedma40_chan_cfg *conf)
1716 {
1717 	int res = 0;
1718 	u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1719 	u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1720 	bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1721 
1722 	if (!conf->dir) {
1723 		chan_err(d40c, "Invalid direction.\n");
1724 		res = -EINVAL;
1725 	}
1726 
1727 	if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1728 	    d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1729 	    d40c->runtime_addr == 0) {
1730 
1731 		chan_err(d40c, "Invalid TX channel address (%d)\n",
1732 			 conf->dst_dev_type);
1733 		res = -EINVAL;
1734 	}
1735 
1736 	if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1737 	    d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1738 	    d40c->runtime_addr == 0) {
1739 		chan_err(d40c, "Invalid RX channel address (%d)\n",
1740 			conf->src_dev_type);
1741 		res = -EINVAL;
1742 	}
1743 
1744 	if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1745 	    dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1746 		chan_err(d40c, "Invalid dst\n");
1747 		res = -EINVAL;
1748 	}
1749 
1750 	if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1751 	    src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1752 		chan_err(d40c, "Invalid src\n");
1753 		res = -EINVAL;
1754 	}
1755 
1756 	if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1757 	    dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1758 		chan_err(d40c, "No event line\n");
1759 		res = -EINVAL;
1760 	}
1761 
1762 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1763 	    (src_event_group != dst_event_group)) {
1764 		chan_err(d40c, "Invalid event group\n");
1765 		res = -EINVAL;
1766 	}
1767 
1768 	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1769 		/*
1770 		 * DMAC HW supports it. Will be added to this driver,
1771 		 * in case any dma client requires it.
1772 		 */
1773 		chan_err(d40c, "periph to periph not supported\n");
1774 		res = -EINVAL;
1775 	}
1776 
1777 	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1778 	    (1 << conf->src_info.data_width) !=
1779 	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1780 	    (1 << conf->dst_info.data_width)) {
1781 		/*
1782 		 * The DMAC hardware only supports
1783 		 * src (burst x width) == dst (burst x width)
1784 		 */
1785 
1786 		chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1787 		res = -EINVAL;
1788 	}
1789 
1790 	return res;
1791 }
1792 
d40_alloc_mask_set(struct d40_phy_res * phy,bool is_src,int log_event_line,bool is_log,bool * first_user)1793 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1794 			       bool is_src, int log_event_line, bool is_log,
1795 			       bool *first_user)
1796 {
1797 	unsigned long flags;
1798 	spin_lock_irqsave(&phy->lock, flags);
1799 
1800 	*first_user = ((phy->allocated_src | phy->allocated_dst)
1801 			== D40_ALLOC_FREE);
1802 
1803 	if (!is_log) {
1804 		/* Physical interrupts are masked per physical full channel */
1805 		if (phy->allocated_src == D40_ALLOC_FREE &&
1806 		    phy->allocated_dst == D40_ALLOC_FREE) {
1807 			phy->allocated_dst = D40_ALLOC_PHY;
1808 			phy->allocated_src = D40_ALLOC_PHY;
1809 			goto found;
1810 		} else
1811 			goto not_found;
1812 	}
1813 
1814 	/* Logical channel */
1815 	if (is_src) {
1816 		if (phy->allocated_src == D40_ALLOC_PHY)
1817 			goto not_found;
1818 
1819 		if (phy->allocated_src == D40_ALLOC_FREE)
1820 			phy->allocated_src = D40_ALLOC_LOG_FREE;
1821 
1822 		if (!(phy->allocated_src & (1 << log_event_line))) {
1823 			phy->allocated_src |= 1 << log_event_line;
1824 			goto found;
1825 		} else
1826 			goto not_found;
1827 	} else {
1828 		if (phy->allocated_dst == D40_ALLOC_PHY)
1829 			goto not_found;
1830 
1831 		if (phy->allocated_dst == D40_ALLOC_FREE)
1832 			phy->allocated_dst = D40_ALLOC_LOG_FREE;
1833 
1834 		if (!(phy->allocated_dst & (1 << log_event_line))) {
1835 			phy->allocated_dst |= 1 << log_event_line;
1836 			goto found;
1837 		} else
1838 			goto not_found;
1839 	}
1840 
1841 not_found:
1842 	spin_unlock_irqrestore(&phy->lock, flags);
1843 	return false;
1844 found:
1845 	spin_unlock_irqrestore(&phy->lock, flags);
1846 	return true;
1847 }
1848 
d40_alloc_mask_free(struct d40_phy_res * phy,bool is_src,int log_event_line)1849 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1850 			       int log_event_line)
1851 {
1852 	unsigned long flags;
1853 	bool is_free = false;
1854 
1855 	spin_lock_irqsave(&phy->lock, flags);
1856 	if (!log_event_line) {
1857 		phy->allocated_dst = D40_ALLOC_FREE;
1858 		phy->allocated_src = D40_ALLOC_FREE;
1859 		is_free = true;
1860 		goto out;
1861 	}
1862 
1863 	/* Logical channel */
1864 	if (is_src) {
1865 		phy->allocated_src &= ~(1 << log_event_line);
1866 		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1867 			phy->allocated_src = D40_ALLOC_FREE;
1868 	} else {
1869 		phy->allocated_dst &= ~(1 << log_event_line);
1870 		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1871 			phy->allocated_dst = D40_ALLOC_FREE;
1872 	}
1873 
1874 	is_free = ((phy->allocated_src | phy->allocated_dst) ==
1875 		   D40_ALLOC_FREE);
1876 
1877 out:
1878 	spin_unlock_irqrestore(&phy->lock, flags);
1879 
1880 	return is_free;
1881 }
1882 
d40_allocate_channel(struct d40_chan * d40c,bool * first_phy_user)1883 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1884 {
1885 	int dev_type;
1886 	int event_group;
1887 	int event_line;
1888 	struct d40_phy_res *phys;
1889 	int i;
1890 	int j;
1891 	int log_num;
1892 	int num_phy_chans;
1893 	bool is_src;
1894 	bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1895 
1896 	phys = d40c->base->phy_res;
1897 	num_phy_chans = d40c->base->num_phy_chans;
1898 
1899 	if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1900 		dev_type = d40c->dma_cfg.src_dev_type;
1901 		log_num = 2 * dev_type;
1902 		is_src = true;
1903 	} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1904 		   d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1905 		/* dst event lines are used for logical memcpy */
1906 		dev_type = d40c->dma_cfg.dst_dev_type;
1907 		log_num = 2 * dev_type + 1;
1908 		is_src = false;
1909 	} else
1910 		return -EINVAL;
1911 
1912 	event_group = D40_TYPE_TO_GROUP(dev_type);
1913 	event_line = D40_TYPE_TO_EVENT(dev_type);
1914 
1915 	if (!is_log) {
1916 		if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1917 			/* Find physical half channel */
1918 			if (d40c->dma_cfg.use_fixed_channel) {
1919 				i = d40c->dma_cfg.phy_channel;
1920 				if (d40_alloc_mask_set(&phys[i], is_src,
1921 						       0, is_log,
1922 						       first_phy_user))
1923 					goto found_phy;
1924 			} else {
1925 				for (i = 0; i < num_phy_chans; i++) {
1926 					if (d40_alloc_mask_set(&phys[i], is_src,
1927 						       0, is_log,
1928 						       first_phy_user))
1929 						goto found_phy;
1930 				}
1931 			}
1932 		} else
1933 			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1934 				int phy_num = j  + event_group * 2;
1935 				for (i = phy_num; i < phy_num + 2; i++) {
1936 					if (d40_alloc_mask_set(&phys[i],
1937 							       is_src,
1938 							       0,
1939 							       is_log,
1940 							       first_phy_user))
1941 						goto found_phy;
1942 				}
1943 			}
1944 		return -EINVAL;
1945 found_phy:
1946 		d40c->phy_chan = &phys[i];
1947 		d40c->log_num = D40_PHY_CHAN;
1948 		goto out;
1949 	}
1950 	if (dev_type == -1)
1951 		return -EINVAL;
1952 
1953 	/* Find logical channel */
1954 	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1955 		int phy_num = j + event_group * 2;
1956 
1957 		if (d40c->dma_cfg.use_fixed_channel) {
1958 			i = d40c->dma_cfg.phy_channel;
1959 
1960 			if ((i != phy_num) && (i != phy_num + 1)) {
1961 				dev_err(chan2dev(d40c),
1962 					"invalid fixed phy channel %d\n", i);
1963 				return -EINVAL;
1964 			}
1965 
1966 			if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1967 					       is_log, first_phy_user))
1968 				goto found_log;
1969 
1970 			dev_err(chan2dev(d40c),
1971 				"could not allocate fixed phy channel %d\n", i);
1972 			return -EINVAL;
1973 		}
1974 
1975 		/*
1976 		 * Spread logical channels across all available physical rather
1977 		 * than pack every logical channel at the first available phy
1978 		 * channels.
1979 		 */
1980 		if (is_src) {
1981 			for (i = phy_num; i < phy_num + 2; i++) {
1982 				if (d40_alloc_mask_set(&phys[i], is_src,
1983 						       event_line, is_log,
1984 						       first_phy_user))
1985 					goto found_log;
1986 			}
1987 		} else {
1988 			for (i = phy_num + 1; i >= phy_num; i--) {
1989 				if (d40_alloc_mask_set(&phys[i], is_src,
1990 						       event_line, is_log,
1991 						       first_phy_user))
1992 					goto found_log;
1993 			}
1994 		}
1995 	}
1996 	return -EINVAL;
1997 
1998 found_log:
1999 	d40c->phy_chan = &phys[i];
2000 	d40c->log_num = log_num;
2001 out:
2002 
2003 	if (is_log)
2004 		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
2005 	else
2006 		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
2007 
2008 	return 0;
2009 
2010 }
2011 
d40_config_memcpy(struct d40_chan * d40c)2012 static int d40_config_memcpy(struct d40_chan *d40c)
2013 {
2014 	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
2015 
2016 	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
2017 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
2018 		d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
2019 		d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
2020 			memcpy[d40c->chan.chan_id];
2021 
2022 	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
2023 		   dma_has_cap(DMA_SLAVE, cap)) {
2024 		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
2025 	} else {
2026 		chan_err(d40c, "No memcpy\n");
2027 		return -EINVAL;
2028 	}
2029 
2030 	return 0;
2031 }
2032 
d40_free_dma(struct d40_chan * d40c)2033 static int d40_free_dma(struct d40_chan *d40c)
2034 {
2035 
2036 	int res = 0;
2037 	u32 event;
2038 	struct d40_phy_res *phy = d40c->phy_chan;
2039 	bool is_src;
2040 
2041 	/* Terminate all queued and active transfers */
2042 	d40_term_all(d40c);
2043 
2044 	if (phy == NULL) {
2045 		chan_err(d40c, "phy == null\n");
2046 		return -EINVAL;
2047 	}
2048 
2049 	if (phy->allocated_src == D40_ALLOC_FREE &&
2050 	    phy->allocated_dst == D40_ALLOC_FREE) {
2051 		chan_err(d40c, "channel already free\n");
2052 		return -EINVAL;
2053 	}
2054 
2055 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
2056 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
2057 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
2058 		is_src = false;
2059 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
2060 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
2061 		is_src = true;
2062 	} else {
2063 		chan_err(d40c, "Unknown direction\n");
2064 		return -EINVAL;
2065 	}
2066 
2067 	pm_runtime_get_sync(d40c->base->dev);
2068 	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2069 	if (res) {
2070 		chan_err(d40c, "stop failed\n");
2071 		goto out;
2072 	}
2073 
2074 	d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2075 
2076 	if (chan_is_logical(d40c))
2077 		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2078 	else
2079 		d40c->base->lookup_phy_chans[phy->num] = NULL;
2080 
2081 	if (d40c->busy) {
2082 		pm_runtime_mark_last_busy(d40c->base->dev);
2083 		pm_runtime_put_autosuspend(d40c->base->dev);
2084 	}
2085 
2086 	d40c->busy = false;
2087 	d40c->phy_chan = NULL;
2088 	d40c->configured = false;
2089 out:
2090 
2091 	pm_runtime_mark_last_busy(d40c->base->dev);
2092 	pm_runtime_put_autosuspend(d40c->base->dev);
2093 	return res;
2094 }
2095 
d40_is_paused(struct d40_chan * d40c)2096 static bool d40_is_paused(struct d40_chan *d40c)
2097 {
2098 	void __iomem *chanbase = chan_base(d40c);
2099 	bool is_paused = false;
2100 	unsigned long flags;
2101 	void __iomem *active_reg;
2102 	u32 status;
2103 	u32 event;
2104 
2105 	spin_lock_irqsave(&d40c->lock, flags);
2106 
2107 	if (chan_is_physical(d40c)) {
2108 		if (d40c->phy_chan->num % 2 == 0)
2109 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2110 		else
2111 			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2112 
2113 		status = (readl(active_reg) &
2114 			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2115 			D40_CHAN_POS(d40c->phy_chan->num);
2116 		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2117 			is_paused = true;
2118 
2119 		goto _exit;
2120 	}
2121 
2122 	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
2123 	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
2124 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
2125 		status = readl(chanbase + D40_CHAN_REG_SDLNK);
2126 	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
2127 		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
2128 		status = readl(chanbase + D40_CHAN_REG_SSLNK);
2129 	} else {
2130 		chan_err(d40c, "Unknown direction\n");
2131 		goto _exit;
2132 	}
2133 
2134 	status = (status & D40_EVENTLINE_MASK(event)) >>
2135 		D40_EVENTLINE_POS(event);
2136 
2137 	if (status != D40_DMA_RUN)
2138 		is_paused = true;
2139 _exit:
2140 	spin_unlock_irqrestore(&d40c->lock, flags);
2141 	return is_paused;
2142 
2143 }
2144 
stedma40_residue(struct dma_chan * chan)2145 static u32 stedma40_residue(struct dma_chan *chan)
2146 {
2147 	struct d40_chan *d40c =
2148 		container_of(chan, struct d40_chan, chan);
2149 	u32 bytes_left;
2150 	unsigned long flags;
2151 
2152 	spin_lock_irqsave(&d40c->lock, flags);
2153 	bytes_left = d40_residue(d40c);
2154 	spin_unlock_irqrestore(&d40c->lock, flags);
2155 
2156 	return bytes_left;
2157 }
2158 
2159 static int
d40_prep_sg_log(struct d40_chan * chan,struct d40_desc * desc,struct scatterlist * sg_src,struct scatterlist * sg_dst,unsigned int sg_len,dma_addr_t src_dev_addr,dma_addr_t dst_dev_addr)2160 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2161 		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2162 		unsigned int sg_len, dma_addr_t src_dev_addr,
2163 		dma_addr_t dst_dev_addr)
2164 {
2165 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2166 	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2167 	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2168 	int ret;
2169 
2170 	ret = d40_log_sg_to_lli(sg_src, sg_len,
2171 				src_dev_addr,
2172 				desc->lli_log.src,
2173 				chan->log_def.lcsp1,
2174 				src_info->data_width,
2175 				dst_info->data_width);
2176 
2177 	ret = d40_log_sg_to_lli(sg_dst, sg_len,
2178 				dst_dev_addr,
2179 				desc->lli_log.dst,
2180 				chan->log_def.lcsp3,
2181 				dst_info->data_width,
2182 				src_info->data_width);
2183 
2184 	return ret < 0 ? ret : 0;
2185 }
2186 
2187 static int
d40_prep_sg_phy(struct d40_chan * chan,struct d40_desc * desc,struct scatterlist * sg_src,struct scatterlist * sg_dst,unsigned int sg_len,dma_addr_t src_dev_addr,dma_addr_t dst_dev_addr)2188 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2189 		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2190 		unsigned int sg_len, dma_addr_t src_dev_addr,
2191 		dma_addr_t dst_dev_addr)
2192 {
2193 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2194 	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2195 	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2196 	unsigned long flags = 0;
2197 	int ret;
2198 
2199 	if (desc->cyclic)
2200 		flags |= LLI_CYCLIC | LLI_TERM_INT;
2201 
2202 	ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2203 				desc->lli_phy.src,
2204 				virt_to_phys(desc->lli_phy.src),
2205 				chan->src_def_cfg,
2206 				src_info, dst_info, flags);
2207 
2208 	ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2209 				desc->lli_phy.dst,
2210 				virt_to_phys(desc->lli_phy.dst),
2211 				chan->dst_def_cfg,
2212 				dst_info, src_info, flags);
2213 
2214 	dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2215 				   desc->lli_pool.size, DMA_TO_DEVICE);
2216 
2217 	return ret < 0 ? ret : 0;
2218 }
2219 
2220 static struct d40_desc *
d40_prep_desc(struct d40_chan * chan,struct scatterlist * sg,unsigned int sg_len,unsigned long dma_flags)2221 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2222 	      unsigned int sg_len, unsigned long dma_flags)
2223 {
2224 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2225 	struct d40_desc *desc;
2226 	int ret;
2227 
2228 	desc = d40_desc_get(chan);
2229 	if (!desc)
2230 		return NULL;
2231 
2232 	desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2233 					cfg->dst_info.data_width);
2234 	if (desc->lli_len < 0) {
2235 		chan_err(chan, "Unaligned size\n");
2236 		goto err;
2237 	}
2238 
2239 	ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2240 	if (ret < 0) {
2241 		chan_err(chan, "Could not allocate lli\n");
2242 		goto err;
2243 	}
2244 
2245 	desc->lli_current = 0;
2246 	desc->txd.flags = dma_flags;
2247 	desc->txd.tx_submit = d40_tx_submit;
2248 
2249 	dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2250 
2251 	return desc;
2252 
2253 err:
2254 	d40_desc_free(chan, desc);
2255 	return NULL;
2256 }
2257 
2258 static dma_addr_t
d40_get_dev_addr(struct d40_chan * chan,enum dma_transfer_direction direction)2259 d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
2260 {
2261 	struct stedma40_platform_data *plat = chan->base->plat_data;
2262 	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2263 	dma_addr_t addr = 0;
2264 
2265 	if (chan->runtime_addr)
2266 		return chan->runtime_addr;
2267 
2268 	if (direction == DMA_DEV_TO_MEM)
2269 		addr = plat->dev_rx[cfg->src_dev_type];
2270 	else if (direction == DMA_MEM_TO_DEV)
2271 		addr = plat->dev_tx[cfg->dst_dev_type];
2272 
2273 	return addr;
2274 }
2275 
2276 static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan * dchan,struct scatterlist * sg_src,struct scatterlist * sg_dst,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long dma_flags)2277 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2278 	    struct scatterlist *sg_dst, unsigned int sg_len,
2279 	    enum dma_transfer_direction direction, unsigned long dma_flags)
2280 {
2281 	struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2282 	dma_addr_t src_dev_addr = 0;
2283 	dma_addr_t dst_dev_addr = 0;
2284 	struct d40_desc *desc;
2285 	unsigned long flags;
2286 	int ret;
2287 
2288 	if (!chan->phy_chan) {
2289 		chan_err(chan, "Cannot prepare unallocated channel\n");
2290 		return NULL;
2291 	}
2292 
2293 	spin_lock_irqsave(&chan->lock, flags);
2294 
2295 	desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2296 	if (desc == NULL)
2297 		goto err;
2298 
2299 	if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2300 		desc->cyclic = true;
2301 
2302 	if (direction != DMA_TRANS_NONE) {
2303 		dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2304 
2305 		if (direction == DMA_DEV_TO_MEM)
2306 			src_dev_addr = dev_addr;
2307 		else if (direction == DMA_MEM_TO_DEV)
2308 			dst_dev_addr = dev_addr;
2309 	}
2310 
2311 	if (chan_is_logical(chan))
2312 		ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2313 				      sg_len, src_dev_addr, dst_dev_addr);
2314 	else
2315 		ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2316 				      sg_len, src_dev_addr, dst_dev_addr);
2317 
2318 	if (ret) {
2319 		chan_err(chan, "Failed to prepare %s sg job: %d\n",
2320 			 chan_is_logical(chan) ? "log" : "phy", ret);
2321 		goto err;
2322 	}
2323 
2324 	/*
2325 	 * add descriptor to the prepare queue in order to be able
2326 	 * to free them later in terminate_all
2327 	 */
2328 	list_add_tail(&desc->node, &chan->prepare_queue);
2329 
2330 	spin_unlock_irqrestore(&chan->lock, flags);
2331 
2332 	return &desc->txd;
2333 
2334 err:
2335 	if (desc)
2336 		d40_desc_free(chan, desc);
2337 	spin_unlock_irqrestore(&chan->lock, flags);
2338 	return NULL;
2339 }
2340 
stedma40_filter(struct dma_chan * chan,void * data)2341 bool stedma40_filter(struct dma_chan *chan, void *data)
2342 {
2343 	struct stedma40_chan_cfg *info = data;
2344 	struct d40_chan *d40c =
2345 		container_of(chan, struct d40_chan, chan);
2346 	int err;
2347 
2348 	if (data) {
2349 		err = d40_validate_conf(d40c, info);
2350 		if (!err)
2351 			d40c->dma_cfg = *info;
2352 	} else
2353 		err = d40_config_memcpy(d40c);
2354 
2355 	if (!err)
2356 		d40c->configured = true;
2357 
2358 	return err == 0;
2359 }
2360 EXPORT_SYMBOL(stedma40_filter);
2361 
__d40_set_prio_rt(struct d40_chan * d40c,int dev_type,bool src)2362 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2363 {
2364 	bool realtime = d40c->dma_cfg.realtime;
2365 	bool highprio = d40c->dma_cfg.high_priority;
2366 	u32 rtreg;
2367 	u32 event = D40_TYPE_TO_EVENT(dev_type);
2368 	u32 group = D40_TYPE_TO_GROUP(dev_type);
2369 	u32 bit = 1 << event;
2370 	u32 prioreg;
2371 	struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2372 
2373 	rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2374 	/*
2375 	 * Due to a hardware bug, in some cases a logical channel triggered by
2376 	 * a high priority destination event line can generate extra packet
2377 	 * transactions.
2378 	 *
2379 	 * The workaround is to not set the high priority level for the
2380 	 * destination event lines that trigger logical channels.
2381 	 */
2382 	if (!src && chan_is_logical(d40c))
2383 		highprio = false;
2384 
2385 	prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2386 
2387 	/* Destination event lines are stored in the upper halfword */
2388 	if (!src)
2389 		bit <<= 16;
2390 
2391 	writel(bit, d40c->base->virtbase + prioreg + group * 4);
2392 	writel(bit, d40c->base->virtbase + rtreg + group * 4);
2393 }
2394 
d40_set_prio_realtime(struct d40_chan * d40c)2395 static void d40_set_prio_realtime(struct d40_chan *d40c)
2396 {
2397 	if (d40c->base->rev < 3)
2398 		return;
2399 
2400 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
2401 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
2402 		__d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
2403 
2404 	if ((d40c->dma_cfg.dir ==  STEDMA40_MEM_TO_PERIPH) ||
2405 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
2406 		__d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
2407 }
2408 
2409 /* DMA ENGINE functions */
d40_alloc_chan_resources(struct dma_chan * chan)2410 static int d40_alloc_chan_resources(struct dma_chan *chan)
2411 {
2412 	int err;
2413 	unsigned long flags;
2414 	struct d40_chan *d40c =
2415 		container_of(chan, struct d40_chan, chan);
2416 	bool is_free_phy;
2417 	spin_lock_irqsave(&d40c->lock, flags);
2418 
2419 	dma_cookie_init(chan);
2420 
2421 	/* If no dma configuration is set use default configuration (memcpy) */
2422 	if (!d40c->configured) {
2423 		err = d40_config_memcpy(d40c);
2424 		if (err) {
2425 			chan_err(d40c, "Failed to configure memcpy channel\n");
2426 			goto fail;
2427 		}
2428 	}
2429 
2430 	err = d40_allocate_channel(d40c, &is_free_phy);
2431 	if (err) {
2432 		chan_err(d40c, "Failed to allocate channel\n");
2433 		d40c->configured = false;
2434 		goto fail;
2435 	}
2436 
2437 	pm_runtime_get_sync(d40c->base->dev);
2438 	/* Fill in basic CFG register values */
2439 	d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2440 		    &d40c->dst_def_cfg, chan_is_logical(d40c));
2441 
2442 	d40_set_prio_realtime(d40c);
2443 
2444 	if (chan_is_logical(d40c)) {
2445 		d40_log_cfg(&d40c->dma_cfg,
2446 			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2447 
2448 		if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2449 			d40c->lcpa = d40c->base->lcpa_base +
2450 				d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
2451 		else
2452 			d40c->lcpa = d40c->base->lcpa_base +
2453 				d40c->dma_cfg.dst_dev_type *
2454 				D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2455 	}
2456 
2457 	dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2458 		 chan_is_logical(d40c) ? "logical" : "physical",
2459 		 d40c->phy_chan->num,
2460 		 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2461 
2462 
2463 	/*
2464 	 * Only write channel configuration to the DMA if the physical
2465 	 * resource is free. In case of multiple logical channels
2466 	 * on the same physical resource, only the first write is necessary.
2467 	 */
2468 	if (is_free_phy)
2469 		d40_config_write(d40c);
2470 fail:
2471 	pm_runtime_mark_last_busy(d40c->base->dev);
2472 	pm_runtime_put_autosuspend(d40c->base->dev);
2473 	spin_unlock_irqrestore(&d40c->lock, flags);
2474 	return err;
2475 }
2476 
d40_free_chan_resources(struct dma_chan * chan)2477 static void d40_free_chan_resources(struct dma_chan *chan)
2478 {
2479 	struct d40_chan *d40c =
2480 		container_of(chan, struct d40_chan, chan);
2481 	int err;
2482 	unsigned long flags;
2483 
2484 	if (d40c->phy_chan == NULL) {
2485 		chan_err(d40c, "Cannot free unallocated channel\n");
2486 		return;
2487 	}
2488 
2489 	spin_lock_irqsave(&d40c->lock, flags);
2490 
2491 	err = d40_free_dma(d40c);
2492 
2493 	if (err)
2494 		chan_err(d40c, "Failed to free channel\n");
2495 	spin_unlock_irqrestore(&d40c->lock, flags);
2496 }
2497 
d40_prep_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t size,unsigned long dma_flags)2498 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2499 						       dma_addr_t dst,
2500 						       dma_addr_t src,
2501 						       size_t size,
2502 						       unsigned long dma_flags)
2503 {
2504 	struct scatterlist dst_sg;
2505 	struct scatterlist src_sg;
2506 
2507 	sg_init_table(&dst_sg, 1);
2508 	sg_init_table(&src_sg, 1);
2509 
2510 	sg_dma_address(&dst_sg) = dst;
2511 	sg_dma_address(&src_sg) = src;
2512 
2513 	sg_dma_len(&dst_sg) = size;
2514 	sg_dma_len(&src_sg) = size;
2515 
2516 	return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2517 }
2518 
2519 static struct dma_async_tx_descriptor *
d40_prep_memcpy_sg(struct dma_chan * chan,struct scatterlist * dst_sg,unsigned int dst_nents,struct scatterlist * src_sg,unsigned int src_nents,unsigned long dma_flags)2520 d40_prep_memcpy_sg(struct dma_chan *chan,
2521 		   struct scatterlist *dst_sg, unsigned int dst_nents,
2522 		   struct scatterlist *src_sg, unsigned int src_nents,
2523 		   unsigned long dma_flags)
2524 {
2525 	if (dst_nents != src_nents)
2526 		return NULL;
2527 
2528 	return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2529 }
2530 
2531 static struct dma_async_tx_descriptor *
d40_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long dma_flags,void * context)2532 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2533 		  unsigned int sg_len, enum dma_transfer_direction direction,
2534 		  unsigned long dma_flags, void *context)
2535 {
2536 	if (!is_slave_direction(direction))
2537 		return NULL;
2538 
2539 	return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2540 }
2541 
2542 static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags,void * context)2543 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2544 		     size_t buf_len, size_t period_len,
2545 		     enum dma_transfer_direction direction, unsigned long flags,
2546 		     void *context)
2547 {
2548 	unsigned int periods = buf_len / period_len;
2549 	struct dma_async_tx_descriptor *txd;
2550 	struct scatterlist *sg;
2551 	int i;
2552 
2553 	sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2554 	for (i = 0; i < periods; i++) {
2555 		sg_dma_address(&sg[i]) = dma_addr;
2556 		sg_dma_len(&sg[i]) = period_len;
2557 		dma_addr += period_len;
2558 	}
2559 
2560 	sg[periods].offset = 0;
2561 	sg_dma_len(&sg[periods]) = 0;
2562 	sg[periods].page_link =
2563 		((unsigned long)sg | 0x01) & ~0x02;
2564 
2565 	txd = d40_prep_sg(chan, sg, sg, periods, direction,
2566 			  DMA_PREP_INTERRUPT);
2567 
2568 	kfree(sg);
2569 
2570 	return txd;
2571 }
2572 
d40_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)2573 static enum dma_status d40_tx_status(struct dma_chan *chan,
2574 				     dma_cookie_t cookie,
2575 				     struct dma_tx_state *txstate)
2576 {
2577 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2578 	enum dma_status ret;
2579 
2580 	if (d40c->phy_chan == NULL) {
2581 		chan_err(d40c, "Cannot read status of unallocated channel\n");
2582 		return -EINVAL;
2583 	}
2584 
2585 	ret = dma_cookie_status(chan, cookie, txstate);
2586 	if (ret != DMA_SUCCESS)
2587 		dma_set_residue(txstate, stedma40_residue(chan));
2588 
2589 	if (d40_is_paused(d40c))
2590 		ret = DMA_PAUSED;
2591 
2592 	return ret;
2593 }
2594 
d40_issue_pending(struct dma_chan * chan)2595 static void d40_issue_pending(struct dma_chan *chan)
2596 {
2597 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2598 	unsigned long flags;
2599 
2600 	if (d40c->phy_chan == NULL) {
2601 		chan_err(d40c, "Channel is not allocated!\n");
2602 		return;
2603 	}
2604 
2605 	spin_lock_irqsave(&d40c->lock, flags);
2606 
2607 	list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2608 
2609 	/* Busy means that queued jobs are already being processed */
2610 	if (!d40c->busy)
2611 		(void) d40_queue_start(d40c);
2612 
2613 	spin_unlock_irqrestore(&d40c->lock, flags);
2614 }
2615 
d40_terminate_all(struct dma_chan * chan)2616 static void d40_terminate_all(struct dma_chan *chan)
2617 {
2618 	unsigned long flags;
2619 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2620 	int ret;
2621 
2622 	spin_lock_irqsave(&d40c->lock, flags);
2623 
2624 	pm_runtime_get_sync(d40c->base->dev);
2625 	ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2626 	if (ret)
2627 		chan_err(d40c, "Failed to stop channel\n");
2628 
2629 	d40_term_all(d40c);
2630 	pm_runtime_mark_last_busy(d40c->base->dev);
2631 	pm_runtime_put_autosuspend(d40c->base->dev);
2632 	if (d40c->busy) {
2633 		pm_runtime_mark_last_busy(d40c->base->dev);
2634 		pm_runtime_put_autosuspend(d40c->base->dev);
2635 	}
2636 	d40c->busy = false;
2637 
2638 	spin_unlock_irqrestore(&d40c->lock, flags);
2639 }
2640 
2641 static int
dma40_config_to_halfchannel(struct d40_chan * d40c,struct stedma40_half_channel_info * info,enum dma_slave_buswidth width,u32 maxburst)2642 dma40_config_to_halfchannel(struct d40_chan *d40c,
2643 			    struct stedma40_half_channel_info *info,
2644 			    enum dma_slave_buswidth width,
2645 			    u32 maxburst)
2646 {
2647 	enum stedma40_periph_data_width addr_width;
2648 	int psize;
2649 
2650 	switch (width) {
2651 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
2652 		addr_width = STEDMA40_BYTE_WIDTH;
2653 		break;
2654 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
2655 		addr_width = STEDMA40_HALFWORD_WIDTH;
2656 		break;
2657 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
2658 		addr_width = STEDMA40_WORD_WIDTH;
2659 		break;
2660 	case DMA_SLAVE_BUSWIDTH_8_BYTES:
2661 		addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2662 		break;
2663 	default:
2664 		dev_err(d40c->base->dev,
2665 			"illegal peripheral address width "
2666 			"requested (%d)\n",
2667 			width);
2668 		return -EINVAL;
2669 	}
2670 
2671 	if (chan_is_logical(d40c)) {
2672 		if (maxburst >= 16)
2673 			psize = STEDMA40_PSIZE_LOG_16;
2674 		else if (maxburst >= 8)
2675 			psize = STEDMA40_PSIZE_LOG_8;
2676 		else if (maxburst >= 4)
2677 			psize = STEDMA40_PSIZE_LOG_4;
2678 		else
2679 			psize = STEDMA40_PSIZE_LOG_1;
2680 	} else {
2681 		if (maxburst >= 16)
2682 			psize = STEDMA40_PSIZE_PHY_16;
2683 		else if (maxburst >= 8)
2684 			psize = STEDMA40_PSIZE_PHY_8;
2685 		else if (maxburst >= 4)
2686 			psize = STEDMA40_PSIZE_PHY_4;
2687 		else
2688 			psize = STEDMA40_PSIZE_PHY_1;
2689 	}
2690 
2691 	info->data_width = addr_width;
2692 	info->psize = psize;
2693 	info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2694 
2695 	return 0;
2696 }
2697 
2698 /* Runtime reconfiguration extension */
d40_set_runtime_config(struct dma_chan * chan,struct dma_slave_config * config)2699 static int d40_set_runtime_config(struct dma_chan *chan,
2700 				  struct dma_slave_config *config)
2701 {
2702 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2703 	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2704 	enum dma_slave_buswidth src_addr_width, dst_addr_width;
2705 	dma_addr_t config_addr;
2706 	u32 src_maxburst, dst_maxburst;
2707 	int ret;
2708 
2709 	src_addr_width = config->src_addr_width;
2710 	src_maxburst = config->src_maxburst;
2711 	dst_addr_width = config->dst_addr_width;
2712 	dst_maxburst = config->dst_maxburst;
2713 
2714 	if (config->direction == DMA_DEV_TO_MEM) {
2715 		dma_addr_t dev_addr_rx =
2716 			d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2717 
2718 		config_addr = config->src_addr;
2719 		if (dev_addr_rx)
2720 			dev_dbg(d40c->base->dev,
2721 				"channel has a pre-wired RX address %08x "
2722 				"overriding with %08x\n",
2723 				dev_addr_rx, config_addr);
2724 		if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2725 			dev_dbg(d40c->base->dev,
2726 				"channel was not configured for peripheral "
2727 				"to memory transfer (%d) overriding\n",
2728 				cfg->dir);
2729 		cfg->dir = STEDMA40_PERIPH_TO_MEM;
2730 
2731 		/* Configure the memory side */
2732 		if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2733 			dst_addr_width = src_addr_width;
2734 		if (dst_maxburst == 0)
2735 			dst_maxburst = src_maxburst;
2736 
2737 	} else if (config->direction == DMA_MEM_TO_DEV) {
2738 		dma_addr_t dev_addr_tx =
2739 			d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2740 
2741 		config_addr = config->dst_addr;
2742 		if (dev_addr_tx)
2743 			dev_dbg(d40c->base->dev,
2744 				"channel has a pre-wired TX address %08x "
2745 				"overriding with %08x\n",
2746 				dev_addr_tx, config_addr);
2747 		if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2748 			dev_dbg(d40c->base->dev,
2749 				"channel was not configured for memory "
2750 				"to peripheral transfer (%d) overriding\n",
2751 				cfg->dir);
2752 		cfg->dir = STEDMA40_MEM_TO_PERIPH;
2753 
2754 		/* Configure the memory side */
2755 		if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2756 			src_addr_width = dst_addr_width;
2757 		if (src_maxburst == 0)
2758 			src_maxburst = dst_maxburst;
2759 	} else {
2760 		dev_err(d40c->base->dev,
2761 			"unrecognized channel direction %d\n",
2762 			config->direction);
2763 		return -EINVAL;
2764 	}
2765 
2766 	if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2767 		dev_err(d40c->base->dev,
2768 			"src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2769 			src_maxburst,
2770 			src_addr_width,
2771 			dst_maxburst,
2772 			dst_addr_width);
2773 		return -EINVAL;
2774 	}
2775 
2776 	if (src_maxburst > 16) {
2777 		src_maxburst = 16;
2778 		dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2779 	} else if (dst_maxburst > 16) {
2780 		dst_maxburst = 16;
2781 		src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2782 	}
2783 
2784 	ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2785 					  src_addr_width,
2786 					  src_maxburst);
2787 	if (ret)
2788 		return ret;
2789 
2790 	ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2791 					  dst_addr_width,
2792 					  dst_maxburst);
2793 	if (ret)
2794 		return ret;
2795 
2796 	/* Fill in register values */
2797 	if (chan_is_logical(d40c))
2798 		d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2799 	else
2800 		d40_phy_cfg(cfg, &d40c->src_def_cfg,
2801 			    &d40c->dst_def_cfg, false);
2802 
2803 	/* These settings will take precedence later */
2804 	d40c->runtime_addr = config_addr;
2805 	d40c->runtime_direction = config->direction;
2806 	dev_dbg(d40c->base->dev,
2807 		"configured channel %s for %s, data width %d/%d, "
2808 		"maxburst %d/%d elements, LE, no flow control\n",
2809 		dma_chan_name(chan),
2810 		(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2811 		src_addr_width, dst_addr_width,
2812 		src_maxburst, dst_maxburst);
2813 
2814 	return 0;
2815 }
2816 
d40_control(struct dma_chan * chan,enum dma_ctrl_cmd cmd,unsigned long arg)2817 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2818 		       unsigned long arg)
2819 {
2820 	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2821 
2822 	if (d40c->phy_chan == NULL) {
2823 		chan_err(d40c, "Channel is not allocated!\n");
2824 		return -EINVAL;
2825 	}
2826 
2827 	switch (cmd) {
2828 	case DMA_TERMINATE_ALL:
2829 		d40_terminate_all(chan);
2830 		return 0;
2831 	case DMA_PAUSE:
2832 		return d40_pause(d40c);
2833 	case DMA_RESUME:
2834 		return d40_resume(d40c);
2835 	case DMA_SLAVE_CONFIG:
2836 		return d40_set_runtime_config(chan,
2837 			(struct dma_slave_config *) arg);
2838 	default:
2839 		break;
2840 	}
2841 
2842 	/* Other commands are unimplemented */
2843 	return -ENXIO;
2844 }
2845 
2846 /* Initialization functions */
2847 
d40_chan_init(struct d40_base * base,struct dma_device * dma,struct d40_chan * chans,int offset,int num_chans)2848 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2849 				 struct d40_chan *chans, int offset,
2850 				 int num_chans)
2851 {
2852 	int i = 0;
2853 	struct d40_chan *d40c;
2854 
2855 	INIT_LIST_HEAD(&dma->channels);
2856 
2857 	for (i = offset; i < offset + num_chans; i++) {
2858 		d40c = &chans[i];
2859 		d40c->base = base;
2860 		d40c->chan.device = dma;
2861 
2862 		spin_lock_init(&d40c->lock);
2863 
2864 		d40c->log_num = D40_PHY_CHAN;
2865 
2866 		INIT_LIST_HEAD(&d40c->done);
2867 		INIT_LIST_HEAD(&d40c->active);
2868 		INIT_LIST_HEAD(&d40c->queue);
2869 		INIT_LIST_HEAD(&d40c->pending_queue);
2870 		INIT_LIST_HEAD(&d40c->client);
2871 		INIT_LIST_HEAD(&d40c->prepare_queue);
2872 
2873 		tasklet_init(&d40c->tasklet, dma_tasklet,
2874 			     (unsigned long) d40c);
2875 
2876 		list_add_tail(&d40c->chan.device_node,
2877 			      &dma->channels);
2878 	}
2879 }
2880 
d40_ops_init(struct d40_base * base,struct dma_device * dev)2881 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2882 {
2883 	if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2884 		dev->device_prep_slave_sg = d40_prep_slave_sg;
2885 
2886 	if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2887 		dev->device_prep_dma_memcpy = d40_prep_memcpy;
2888 
2889 		/*
2890 		 * This controller can only access address at even
2891 		 * 32bit boundaries, i.e. 2^2
2892 		 */
2893 		dev->copy_align = 2;
2894 	}
2895 
2896 	if (dma_has_cap(DMA_SG, dev->cap_mask))
2897 		dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2898 
2899 	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2900 		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2901 
2902 	dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2903 	dev->device_free_chan_resources = d40_free_chan_resources;
2904 	dev->device_issue_pending = d40_issue_pending;
2905 	dev->device_tx_status = d40_tx_status;
2906 	dev->device_control = d40_control;
2907 	dev->dev = base->dev;
2908 }
2909 
d40_dmaengine_init(struct d40_base * base,int num_reserved_chans)2910 static int __init d40_dmaengine_init(struct d40_base *base,
2911 				     int num_reserved_chans)
2912 {
2913 	int err ;
2914 
2915 	d40_chan_init(base, &base->dma_slave, base->log_chans,
2916 		      0, base->num_log_chans);
2917 
2918 	dma_cap_zero(base->dma_slave.cap_mask);
2919 	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2920 	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2921 
2922 	d40_ops_init(base, &base->dma_slave);
2923 
2924 	err = dma_async_device_register(&base->dma_slave);
2925 
2926 	if (err) {
2927 		d40_err(base->dev, "Failed to register slave channels\n");
2928 		goto failure1;
2929 	}
2930 
2931 	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2932 		      base->num_log_chans, base->plat_data->memcpy_len);
2933 
2934 	dma_cap_zero(base->dma_memcpy.cap_mask);
2935 	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2936 	dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2937 
2938 	d40_ops_init(base, &base->dma_memcpy);
2939 
2940 	err = dma_async_device_register(&base->dma_memcpy);
2941 
2942 	if (err) {
2943 		d40_err(base->dev,
2944 			"Failed to regsiter memcpy only channels\n");
2945 		goto failure2;
2946 	}
2947 
2948 	d40_chan_init(base, &base->dma_both, base->phy_chans,
2949 		      0, num_reserved_chans);
2950 
2951 	dma_cap_zero(base->dma_both.cap_mask);
2952 	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2953 	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2954 	dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2955 	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2956 
2957 	d40_ops_init(base, &base->dma_both);
2958 	err = dma_async_device_register(&base->dma_both);
2959 
2960 	if (err) {
2961 		d40_err(base->dev,
2962 			"Failed to register logical and physical capable channels\n");
2963 		goto failure3;
2964 	}
2965 	return 0;
2966 failure3:
2967 	dma_async_device_unregister(&base->dma_memcpy);
2968 failure2:
2969 	dma_async_device_unregister(&base->dma_slave);
2970 failure1:
2971 	return err;
2972 }
2973 
2974 /* Suspend resume functionality */
2975 #ifdef CONFIG_PM
dma40_pm_suspend(struct device * dev)2976 static int dma40_pm_suspend(struct device *dev)
2977 {
2978 	struct platform_device *pdev = to_platform_device(dev);
2979 	struct d40_base *base = platform_get_drvdata(pdev);
2980 	int ret = 0;
2981 
2982 	if (base->lcpa_regulator)
2983 		ret = regulator_disable(base->lcpa_regulator);
2984 	return ret;
2985 }
2986 
dma40_runtime_suspend(struct device * dev)2987 static int dma40_runtime_suspend(struct device *dev)
2988 {
2989 	struct platform_device *pdev = to_platform_device(dev);
2990 	struct d40_base *base = platform_get_drvdata(pdev);
2991 
2992 	d40_save_restore_registers(base, true);
2993 
2994 	/* Don't disable/enable clocks for v1 due to HW bugs */
2995 	if (base->rev != 1)
2996 		writel_relaxed(base->gcc_pwr_off_mask,
2997 			       base->virtbase + D40_DREG_GCC);
2998 
2999 	return 0;
3000 }
3001 
dma40_runtime_resume(struct device * dev)3002 static int dma40_runtime_resume(struct device *dev)
3003 {
3004 	struct platform_device *pdev = to_platform_device(dev);
3005 	struct d40_base *base = platform_get_drvdata(pdev);
3006 
3007 	if (base->initialized)
3008 		d40_save_restore_registers(base, false);
3009 
3010 	writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3011 		       base->virtbase + D40_DREG_GCC);
3012 	return 0;
3013 }
3014 
dma40_resume(struct device * dev)3015 static int dma40_resume(struct device *dev)
3016 {
3017 	struct platform_device *pdev = to_platform_device(dev);
3018 	struct d40_base *base = platform_get_drvdata(pdev);
3019 	int ret = 0;
3020 
3021 	if (base->lcpa_regulator)
3022 		ret = regulator_enable(base->lcpa_regulator);
3023 
3024 	return ret;
3025 }
3026 
3027 static const struct dev_pm_ops dma40_pm_ops = {
3028 	.suspend		= dma40_pm_suspend,
3029 	.runtime_suspend	= dma40_runtime_suspend,
3030 	.runtime_resume		= dma40_runtime_resume,
3031 	.resume			= dma40_resume,
3032 };
3033 #define DMA40_PM_OPS	(&dma40_pm_ops)
3034 #else
3035 #define DMA40_PM_OPS	NULL
3036 #endif
3037 
3038 /* Initialization functions. */
3039 
d40_phy_res_init(struct d40_base * base)3040 static int __init d40_phy_res_init(struct d40_base *base)
3041 {
3042 	int i;
3043 	int num_phy_chans_avail = 0;
3044 	u32 val[2];
3045 	int odd_even_bit = -2;
3046 	int gcc = D40_DREG_GCC_ENA;
3047 
3048 	val[0] = readl(base->virtbase + D40_DREG_PRSME);
3049 	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3050 
3051 	for (i = 0; i < base->num_phy_chans; i++) {
3052 		base->phy_res[i].num = i;
3053 		odd_even_bit += 2 * ((i % 2) == 0);
3054 		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3055 			/* Mark security only channels as occupied */
3056 			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3057 			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3058 			base->phy_res[i].reserved = true;
3059 			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3060 						       D40_DREG_GCC_SRC);
3061 			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3062 						       D40_DREG_GCC_DST);
3063 
3064 
3065 		} else {
3066 			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3067 			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3068 			base->phy_res[i].reserved = false;
3069 			num_phy_chans_avail++;
3070 		}
3071 		spin_lock_init(&base->phy_res[i].lock);
3072 	}
3073 
3074 	/* Mark disabled channels as occupied */
3075 	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3076 		int chan = base->plat_data->disabled_channels[i];
3077 
3078 		base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3079 		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3080 		base->phy_res[chan].reserved = true;
3081 		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3082 					       D40_DREG_GCC_SRC);
3083 		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3084 					       D40_DREG_GCC_DST);
3085 		num_phy_chans_avail--;
3086 	}
3087 
3088 	/* Mark soft_lli channels */
3089 	for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3090 		int chan = base->plat_data->soft_lli_chans[i];
3091 
3092 		base->phy_res[chan].use_soft_lli = true;
3093 	}
3094 
3095 	dev_info(base->dev, "%d of %d physical DMA channels available\n",
3096 		 num_phy_chans_avail, base->num_phy_chans);
3097 
3098 	/* Verify settings extended vs standard */
3099 	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3100 
3101 	for (i = 0; i < base->num_phy_chans; i++) {
3102 
3103 		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3104 		    (val[0] & 0x3) != 1)
3105 			dev_info(base->dev,
3106 				 "[%s] INFO: channel %d is misconfigured (%d)\n",
3107 				 __func__, i, val[0] & 0x3);
3108 
3109 		val[0] = val[0] >> 2;
3110 	}
3111 
3112 	/*
3113 	 * To keep things simple, Enable all clocks initially.
3114 	 * The clocks will get managed later post channel allocation.
3115 	 * The clocks for the event lines on which reserved channels exists
3116 	 * are not managed here.
3117 	 */
3118 	writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3119 	base->gcc_pwr_off_mask = gcc;
3120 
3121 	return num_phy_chans_avail;
3122 }
3123 
d40_hw_detect_init(struct platform_device * pdev)3124 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3125 {
3126 	struct stedma40_platform_data *plat_data;
3127 	struct clk *clk = NULL;
3128 	void __iomem *virtbase = NULL;
3129 	struct resource *res = NULL;
3130 	struct d40_base *base = NULL;
3131 	int num_log_chans = 0;
3132 	int num_phy_chans;
3133 	int clk_ret = -EINVAL;
3134 	int i;
3135 	u32 pid;
3136 	u32 cid;
3137 	u8 rev;
3138 
3139 	clk = clk_get(&pdev->dev, NULL);
3140 	if (IS_ERR(clk)) {
3141 		d40_err(&pdev->dev, "No matching clock found\n");
3142 		goto failure;
3143 	}
3144 
3145 	clk_ret = clk_prepare_enable(clk);
3146 	if (clk_ret) {
3147 		d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3148 		goto failure;
3149 	}
3150 
3151 	/* Get IO for DMAC base address */
3152 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3153 	if (!res)
3154 		goto failure;
3155 
3156 	if (request_mem_region(res->start, resource_size(res),
3157 			       D40_NAME " I/O base") == NULL)
3158 		goto failure;
3159 
3160 	virtbase = ioremap(res->start, resource_size(res));
3161 	if (!virtbase)
3162 		goto failure;
3163 
3164 	/* This is just a regular AMBA PrimeCell ID actually */
3165 	for (pid = 0, i = 0; i < 4; i++)
3166 		pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3167 			& 255) << (i * 8);
3168 	for (cid = 0, i = 0; i < 4; i++)
3169 		cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3170 			& 255) << (i * 8);
3171 
3172 	if (cid != AMBA_CID) {
3173 		d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3174 		goto failure;
3175 	}
3176 	if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3177 		d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3178 			AMBA_MANF_BITS(pid),
3179 			AMBA_VENDOR_ST);
3180 		goto failure;
3181 	}
3182 	/*
3183 	 * HW revision:
3184 	 * DB8500ed has revision 0
3185 	 * ? has revision 1
3186 	 * DB8500v1 has revision 2
3187 	 * DB8500v2 has revision 3
3188 	 * AP9540v1 has revision 4
3189 	 * DB8540v1 has revision 4
3190 	 */
3191 	rev = AMBA_REV_BITS(pid);
3192 
3193 	plat_data = pdev->dev.platform_data;
3194 
3195 	/* The number of physical channels on this HW */
3196 	if (plat_data->num_of_phy_chans)
3197 		num_phy_chans = plat_data->num_of_phy_chans;
3198 	else
3199 		num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3200 
3201 	dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
3202 		 rev, res->start, num_phy_chans);
3203 
3204 	if (rev < 2) {
3205 		d40_err(&pdev->dev, "hardware revision: %d is not supported",
3206 			rev);
3207 		goto failure;
3208 	}
3209 
3210 	/* Count the number of logical channels in use */
3211 	for (i = 0; i < plat_data->dev_len; i++)
3212 		if (plat_data->dev_rx[i] != 0)
3213 			num_log_chans++;
3214 
3215 	for (i = 0; i < plat_data->dev_len; i++)
3216 		if (plat_data->dev_tx[i] != 0)
3217 			num_log_chans++;
3218 
3219 	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3220 		       (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
3221 		       sizeof(struct d40_chan), GFP_KERNEL);
3222 
3223 	if (base == NULL) {
3224 		d40_err(&pdev->dev, "Out of memory\n");
3225 		goto failure;
3226 	}
3227 
3228 	base->rev = rev;
3229 	base->clk = clk;
3230 	base->num_phy_chans = num_phy_chans;
3231 	base->num_log_chans = num_log_chans;
3232 	base->phy_start = res->start;
3233 	base->phy_size = resource_size(res);
3234 	base->virtbase = virtbase;
3235 	base->plat_data = plat_data;
3236 	base->dev = &pdev->dev;
3237 	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3238 	base->log_chans = &base->phy_chans[num_phy_chans];
3239 
3240 	if (base->plat_data->num_of_phy_chans == 14) {
3241 		base->gen_dmac.backup = d40_backup_regs_v4b;
3242 		base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3243 		base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3244 		base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3245 		base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3246 		base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3247 		base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3248 		base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3249 		base->gen_dmac.il = il_v4b;
3250 		base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3251 		base->gen_dmac.init_reg = dma_init_reg_v4b;
3252 		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3253 	} else {
3254 		if (base->rev >= 3) {
3255 			base->gen_dmac.backup = d40_backup_regs_v4a;
3256 			base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3257 		}
3258 		base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3259 		base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3260 		base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3261 		base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3262 		base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3263 		base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3264 		base->gen_dmac.il = il_v4a;
3265 		base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3266 		base->gen_dmac.init_reg = dma_init_reg_v4a;
3267 		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3268 	}
3269 
3270 	base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3271 				GFP_KERNEL);
3272 	if (!base->phy_res)
3273 		goto failure;
3274 
3275 	base->lookup_phy_chans = kzalloc(num_phy_chans *
3276 					 sizeof(struct d40_chan *),
3277 					 GFP_KERNEL);
3278 	if (!base->lookup_phy_chans)
3279 		goto failure;
3280 
3281 	if (num_log_chans + plat_data->memcpy_len) {
3282 		/*
3283 		 * The max number of logical channels are event lines for all
3284 		 * src devices and dst devices
3285 		 */
3286 		base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
3287 						 sizeof(struct d40_chan *),
3288 						 GFP_KERNEL);
3289 		if (!base->lookup_log_chans)
3290 			goto failure;
3291 	}
3292 
3293 	base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3294 					    sizeof(d40_backup_regs_chan),
3295 					    GFP_KERNEL);
3296 	if (!base->reg_val_backup_chan)
3297 		goto failure;
3298 
3299 	base->lcla_pool.alloc_map =
3300 		kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3301 			* D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
3302 	if (!base->lcla_pool.alloc_map)
3303 		goto failure;
3304 
3305 	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3306 					    0, SLAB_HWCACHE_ALIGN,
3307 					    NULL);
3308 	if (base->desc_slab == NULL)
3309 		goto failure;
3310 
3311 	return base;
3312 
3313 failure:
3314 	if (!clk_ret)
3315 		clk_disable_unprepare(clk);
3316 	if (!IS_ERR(clk))
3317 		clk_put(clk);
3318 	if (virtbase)
3319 		iounmap(virtbase);
3320 	if (res)
3321 		release_mem_region(res->start,
3322 				   resource_size(res));
3323 	if (virtbase)
3324 		iounmap(virtbase);
3325 
3326 	if (base) {
3327 		kfree(base->lcla_pool.alloc_map);
3328 		kfree(base->reg_val_backup_chan);
3329 		kfree(base->lookup_log_chans);
3330 		kfree(base->lookup_phy_chans);
3331 		kfree(base->phy_res);
3332 		kfree(base);
3333 	}
3334 
3335 	return NULL;
3336 }
3337 
d40_hw_init(struct d40_base * base)3338 static void __init d40_hw_init(struct d40_base *base)
3339 {
3340 
3341 	int i;
3342 	u32 prmseo[2] = {0, 0};
3343 	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3344 	u32 pcmis = 0;
3345 	u32 pcicr = 0;
3346 	struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3347 	u32 reg_size = base->gen_dmac.init_reg_size;
3348 
3349 	for (i = 0; i < reg_size; i++)
3350 		writel(dma_init_reg[i].val,
3351 		       base->virtbase + dma_init_reg[i].reg);
3352 
3353 	/* Configure all our dma channels to default settings */
3354 	for (i = 0; i < base->num_phy_chans; i++) {
3355 
3356 		activeo[i % 2] = activeo[i % 2] << 2;
3357 
3358 		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3359 		    == D40_ALLOC_PHY) {
3360 			activeo[i % 2] |= 3;
3361 			continue;
3362 		}
3363 
3364 		/* Enable interrupt # */
3365 		pcmis = (pcmis << 1) | 1;
3366 
3367 		/* Clear interrupt # */
3368 		pcicr = (pcicr << 1) | 1;
3369 
3370 		/* Set channel to physical mode */
3371 		prmseo[i % 2] = prmseo[i % 2] << 2;
3372 		prmseo[i % 2] |= 1;
3373 
3374 	}
3375 
3376 	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3377 	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3378 	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3379 	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3380 
3381 	/* Write which interrupt to enable */
3382 	writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3383 
3384 	/* Write which interrupt to clear */
3385 	writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3386 
3387 	/* These are __initdata and cannot be accessed after init */
3388 	base->gen_dmac.init_reg = NULL;
3389 	base->gen_dmac.init_reg_size = 0;
3390 }
3391 
d40_lcla_allocate(struct d40_base * base)3392 static int __init d40_lcla_allocate(struct d40_base *base)
3393 {
3394 	struct d40_lcla_pool *pool = &base->lcla_pool;
3395 	unsigned long *page_list;
3396 	int i, j;
3397 	int ret = 0;
3398 
3399 	/*
3400 	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3401 	 * To full fill this hardware requirement without wasting 256 kb
3402 	 * we allocate pages until we get an aligned one.
3403 	 */
3404 	page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3405 			    GFP_KERNEL);
3406 
3407 	if (!page_list) {
3408 		ret = -ENOMEM;
3409 		goto failure;
3410 	}
3411 
3412 	/* Calculating how many pages that are required */
3413 	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3414 
3415 	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3416 		page_list[i] = __get_free_pages(GFP_KERNEL,
3417 						base->lcla_pool.pages);
3418 		if (!page_list[i]) {
3419 
3420 			d40_err(base->dev, "Failed to allocate %d pages.\n",
3421 				base->lcla_pool.pages);
3422 
3423 			for (j = 0; j < i; j++)
3424 				free_pages(page_list[j], base->lcla_pool.pages);
3425 			goto failure;
3426 		}
3427 
3428 		if ((virt_to_phys((void *)page_list[i]) &
3429 		     (LCLA_ALIGNMENT - 1)) == 0)
3430 			break;
3431 	}
3432 
3433 	for (j = 0; j < i; j++)
3434 		free_pages(page_list[j], base->lcla_pool.pages);
3435 
3436 	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3437 		base->lcla_pool.base = (void *)page_list[i];
3438 	} else {
3439 		/*
3440 		 * After many attempts and no succees with finding the correct
3441 		 * alignment, try with allocating a big buffer.
3442 		 */
3443 		dev_warn(base->dev,
3444 			 "[%s] Failed to get %d pages @ 18 bit align.\n",
3445 			 __func__, base->lcla_pool.pages);
3446 		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3447 							 base->num_phy_chans +
3448 							 LCLA_ALIGNMENT,
3449 							 GFP_KERNEL);
3450 		if (!base->lcla_pool.base_unaligned) {
3451 			ret = -ENOMEM;
3452 			goto failure;
3453 		}
3454 
3455 		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3456 						 LCLA_ALIGNMENT);
3457 	}
3458 
3459 	pool->dma_addr = dma_map_single(base->dev, pool->base,
3460 					SZ_1K * base->num_phy_chans,
3461 					DMA_TO_DEVICE);
3462 	if (dma_mapping_error(base->dev, pool->dma_addr)) {
3463 		pool->dma_addr = 0;
3464 		ret = -ENOMEM;
3465 		goto failure;
3466 	}
3467 
3468 	writel(virt_to_phys(base->lcla_pool.base),
3469 	       base->virtbase + D40_DREG_LCLA);
3470 failure:
3471 	kfree(page_list);
3472 	return ret;
3473 }
3474 
d40_probe(struct platform_device * pdev)3475 static int __init d40_probe(struct platform_device *pdev)
3476 {
3477 	int err;
3478 	int ret = -ENOENT;
3479 	struct d40_base *base;
3480 	struct resource *res = NULL;
3481 	int num_reserved_chans;
3482 	u32 val;
3483 
3484 	base = d40_hw_detect_init(pdev);
3485 
3486 	if (!base)
3487 		goto failure;
3488 
3489 	num_reserved_chans = d40_phy_res_init(base);
3490 
3491 	platform_set_drvdata(pdev, base);
3492 
3493 	spin_lock_init(&base->interrupt_lock);
3494 	spin_lock_init(&base->execmd_lock);
3495 
3496 	/* Get IO for logical channel parameter address */
3497 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3498 	if (!res) {
3499 		ret = -ENOENT;
3500 		d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3501 		goto failure;
3502 	}
3503 	base->lcpa_size = resource_size(res);
3504 	base->phy_lcpa = res->start;
3505 
3506 	if (request_mem_region(res->start, resource_size(res),
3507 			       D40_NAME " I/O lcpa") == NULL) {
3508 		ret = -EBUSY;
3509 		d40_err(&pdev->dev,
3510 			"Failed to request LCPA region 0x%x-0x%x\n",
3511 			res->start, res->end);
3512 		goto failure;
3513 	}
3514 
3515 	/* We make use of ESRAM memory for this. */
3516 	val = readl(base->virtbase + D40_DREG_LCPA);
3517 	if (res->start != val && val != 0) {
3518 		dev_warn(&pdev->dev,
3519 			 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3520 			 __func__, val, res->start);
3521 	} else
3522 		writel(res->start, base->virtbase + D40_DREG_LCPA);
3523 
3524 	base->lcpa_base = ioremap(res->start, resource_size(res));
3525 	if (!base->lcpa_base) {
3526 		ret = -ENOMEM;
3527 		d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3528 		goto failure;
3529 	}
3530 	/* If lcla has to be located in ESRAM we don't need to allocate */
3531 	if (base->plat_data->use_esram_lcla) {
3532 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3533 							"lcla_esram");
3534 		if (!res) {
3535 			ret = -ENOENT;
3536 			d40_err(&pdev->dev,
3537 				"No \"lcla_esram\" memory resource\n");
3538 			goto failure;
3539 		}
3540 		base->lcla_pool.base = ioremap(res->start,
3541 						resource_size(res));
3542 		if (!base->lcla_pool.base) {
3543 			ret = -ENOMEM;
3544 			d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3545 			goto failure;
3546 		}
3547 		writel(res->start, base->virtbase + D40_DREG_LCLA);
3548 
3549 	} else {
3550 		ret = d40_lcla_allocate(base);
3551 		if (ret) {
3552 			d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3553 			goto failure;
3554 		}
3555 	}
3556 
3557 	spin_lock_init(&base->lcla_pool.lock);
3558 
3559 	base->irq = platform_get_irq(pdev, 0);
3560 
3561 	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3562 	if (ret) {
3563 		d40_err(&pdev->dev, "No IRQ defined\n");
3564 		goto failure;
3565 	}
3566 
3567 	pm_runtime_irq_safe(base->dev);
3568 	pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3569 	pm_runtime_use_autosuspend(base->dev);
3570 	pm_runtime_enable(base->dev);
3571 	pm_runtime_resume(base->dev);
3572 
3573 	if (base->plat_data->use_esram_lcla) {
3574 
3575 		base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3576 		if (IS_ERR(base->lcpa_regulator)) {
3577 			d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3578 			base->lcpa_regulator = NULL;
3579 			goto failure;
3580 		}
3581 
3582 		ret = regulator_enable(base->lcpa_regulator);
3583 		if (ret) {
3584 			d40_err(&pdev->dev,
3585 				"Failed to enable lcpa_regulator\n");
3586 			regulator_put(base->lcpa_regulator);
3587 			base->lcpa_regulator = NULL;
3588 			goto failure;
3589 		}
3590 	}
3591 
3592 	base->initialized = true;
3593 	err = d40_dmaengine_init(base, num_reserved_chans);
3594 	if (err)
3595 		goto failure;
3596 
3597 	base->dev->dma_parms = &base->dma_parms;
3598 	err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3599 	if (err) {
3600 		d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3601 		goto failure;
3602 	}
3603 
3604 	d40_hw_init(base);
3605 
3606 	dev_info(base->dev, "initialized\n");
3607 	return 0;
3608 
3609 failure:
3610 	if (base) {
3611 		if (base->desc_slab)
3612 			kmem_cache_destroy(base->desc_slab);
3613 		if (base->virtbase)
3614 			iounmap(base->virtbase);
3615 
3616 		if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3617 			iounmap(base->lcla_pool.base);
3618 			base->lcla_pool.base = NULL;
3619 		}
3620 
3621 		if (base->lcla_pool.dma_addr)
3622 			dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3623 					 SZ_1K * base->num_phy_chans,
3624 					 DMA_TO_DEVICE);
3625 
3626 		if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3627 			free_pages((unsigned long)base->lcla_pool.base,
3628 				   base->lcla_pool.pages);
3629 
3630 		kfree(base->lcla_pool.base_unaligned);
3631 
3632 		if (base->phy_lcpa)
3633 			release_mem_region(base->phy_lcpa,
3634 					   base->lcpa_size);
3635 		if (base->phy_start)
3636 			release_mem_region(base->phy_start,
3637 					   base->phy_size);
3638 		if (base->clk) {
3639 			clk_disable_unprepare(base->clk);
3640 			clk_put(base->clk);
3641 		}
3642 
3643 		if (base->lcpa_regulator) {
3644 			regulator_disable(base->lcpa_regulator);
3645 			regulator_put(base->lcpa_regulator);
3646 		}
3647 
3648 		kfree(base->lcla_pool.alloc_map);
3649 		kfree(base->lookup_log_chans);
3650 		kfree(base->lookup_phy_chans);
3651 		kfree(base->phy_res);
3652 		kfree(base);
3653 	}
3654 
3655 	d40_err(&pdev->dev, "probe failed\n");
3656 	return ret;
3657 }
3658 
3659 static struct platform_driver d40_driver = {
3660 	.driver = {
3661 		.owner = THIS_MODULE,
3662 		.name  = D40_NAME,
3663 		.pm = DMA40_PM_OPS,
3664 	},
3665 };
3666 
stedma40_init(void)3667 static int __init stedma40_init(void)
3668 {
3669 	return platform_driver_probe(&d40_driver, d40_probe);
3670 }
3671 subsys_initcall(stedma40_init);
3672