• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 *
3 * SPDX-License-Identifier: GPL-2.0
4 *
5 * Copyright (C) 2011-2018 ARM or its affiliates
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 */
19 
20 #include <linux/version.h>
21 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
22 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 12, 0))
23 #ifndef CONFIG_SET_FS
24 #define CONFIG_SET_FS 1
25 #endif
26 #endif
27 #ifndef CONFIG_THREAD_INFO_IN_TASK
28 #define CONFIG_THREAD_INFO_IN_TASK 1
29 #endif
30 #include <linux/thread_info.h>
31 #include <linux/sched.h>
32 #endif
33 
34 #include <linux/kernel.h> /* //printk() */
35 #include <asm/uaccess.h>
36 #include <linux/gfp.h>
37 #include <linux/cdev.h>
38 #include <linux/slab.h>
39 #include <asm/types.h>
40 #include <asm/io.h>
41 #include <linux/time.h>
42 #include "acamera_types.h"
43 #include "acamera_logger.h"
44 #include "system_dma.h"
45 
46 #define SYSTEM_DMA_TOGGLE_COUNT 2
47 #define SYSTEM_DMA_MAX_CHANNEL 2
48 
49 #if FW_USE_SYSTEM_DMA
50 
51 #include <linux/dma-mapping.h>
52 #include <linux/dmaengine.h>
53 #include <asm/dma-mapping.h>
54 #include <asm/cacheflush.h>
55 
56 typedef struct {
57     //in case scatter and gather is not supported, need more than one channel
58     struct dma_chan *dma_channel[SYSTEM_DMA_MAX_CHANNEL];
59     //for sg
60     struct sg_table sg_device_table[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
61     unsigned int sg_device_nents[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
62 
63     struct sg_table sg_fwmem_table[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
64     unsigned int sg_fwmem_nents[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
65 
66     //for flushing
67     fwmem_addr_pair_t *fwmem_pair_flush[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
68     //for callback unmapping
69     int32_t buff_loc;
70     uint32_t direction;
71     uint32_t cur_fw_ctx_id;
72 
73     //conf synchronization and completion
74     dma_completion_callback complete_func;
75     atomic_t nents_done;
76     struct completion comp;
77 
78 } system_dma_device_t;
79 
80 #else
81 
82 #include <linux/interrupt.h>
83 
84 typedef struct {
85     void __iomem *dev_addr;
86     void *fw_addr;
87     size_t size;
88     void *sys_back_ptr;
89 } mem_addr_pair_t;
90 
91 typedef struct {
92     struct tasklet_struct m_task;
93     mem_addr_pair_t *mem_data;
94 } mem_tasklet_t;
95 
96 
97 typedef struct {
98     char *name;
99     unsigned int sg_device_nents[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
100 
101     unsigned int sg_fwmem_nents[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
102 
103     mem_addr_pair_t *mem_addrs[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT];
104 
105     mem_tasklet_t task_list[FIRMWARE_CONTEXT_NUMBER][SYSTEM_DMA_TOGGLE_COUNT][SYSTEM_DMA_MAX_CHANNEL];
106 
107     int32_t buff_loc;
108     uint32_t direction;
109     uint32_t cur_fw_ctx_id;
110 
111     //conf synchronization and completion
112     dma_completion_callback complete_func;
113     atomic_t nents_done;
114     struct completion comp;
115 
116 } system_dma_device_t;
117 
118 #endif
119 
120 
system_dma_init(void ** ctx)121 int32_t system_dma_init( void **ctx )
122 {
123     int32_t i, result = 0;
124     int32_t idx;
125 
126     if ( ctx != NULL ) {
127 
128         *ctx = system_malloc( sizeof( system_dma_device_t ) );
129         system_dma_device_t *system_dma_device = (system_dma_device_t *)*ctx;
130 
131         if ( !( *ctx ) ) {
132             LOG( LOG_CRIT, "No memory for ctx" );
133             return -1;
134         }
135 
136 #if FW_USE_SYSTEM_DMA
137         struct dma_chan *dma_channel = NULL;
138         dma_cap_mask_t mask;
139         dma_cap_zero( mask );
140         dma_cap_set( DMA_MEMCPY, mask );
141 
142         for ( i = 0; i < SYSTEM_DMA_MAX_CHANNEL; i++ ) {
143             dma_channel = dma_request_channel( mask, 0, NULL );
144             LOG( LOG_INFO, "allocating dma" );
145             if ( dma_channel != NULL ) {
146                 system_dma_device->dma_channel[i] = dma_channel;
147             } else {
148                 kfree( *ctx );
149                 LOG( LOG_CRIT, "Failed to request DMA channel" );
150                 return -1;
151             }
152         }
153 
154         for ( idx = 0; idx < FIRMWARE_CONTEXT_NUMBER; idx++ ) {
155             for ( i = 0; i < SYSTEM_DMA_TOGGLE_COUNT; i++ ) {
156                 system_dma_device->sg_device_nents[idx][i] = 0;
157                 system_dma_device->sg_fwmem_nents[idx][i] = 0;
158             }
159         }
160 
161 #else
162         system_dma_device->name = "TSK_DMA";
163         for ( idx = 0; idx < FIRMWARE_CONTEXT_NUMBER; idx++ ) {
164             for ( i = 0; i < SYSTEM_DMA_TOGGLE_COUNT; i++ ) {
165                 system_dma_device->sg_device_nents[idx][i] = 0;
166                 system_dma_device->sg_fwmem_nents[idx][i] = 0;
167                 system_dma_device->mem_addrs[idx][i] = 0;
168             }
169         }
170 #endif
171     } else {
172         result = -1;
173         LOG( LOG_CRIT, "Input ctx pointer is NULL" );
174     }
175 
176     return result;
177 }
178 
179 
system_dma_destroy(void * ctx)180 int32_t system_dma_destroy( void *ctx )
181 {
182     int32_t i, result = 0;
183     int32_t idx;
184 
185     if ( ctx != 0 ) {
186         system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
187 
188 #if FW_USE_SYSTEM_DMA
189         for ( i = 0; i < SYSTEM_DMA_MAX_CHANNEL; i++ ) {
190             dma_release_channel( system_dma_device->dma_channel[i] );
191         }
192 
193         for ( idx = 0; idx < FIRMWARE_CONTEXT_NUMBER; idx++ ) {
194             for ( i = 0; i < SYSTEM_DMA_TOGGLE_COUNT; i++ ) {
195                 if ( system_dma_device->sg_device_nents[idx][i] )
196                     sg_free_table( &system_dma_device->sg_device_table[idx][i] );
197 
198                 if ( system_dma_device->sg_fwmem_nents[idx][i] )
199                     sg_free_table( &system_dma_device->sg_device_table[idx][i] );
200 
201                 if ( system_dma_device->fwmem_pair_flush[idx][i] )
202                     kfree( system_dma_device->fwmem_pair_flush[idx][i] );
203             }
204         }
205 
206 #else
207         for ( idx = 0; idx < FIRMWARE_CONTEXT_NUMBER; idx++ ) {
208             for ( i = 0; i < SYSTEM_DMA_TOGGLE_COUNT; i++ ) {
209                 if ( system_dma_device->mem_addrs[idx][i] ) {
210                     int j;
211                     for ( j = 0; j < system_dma_device->sg_device_nents[idx][i]; j++ )
212                         iounmap( system_dma_device->mem_addrs[idx][i][j].dev_addr );
213                     kfree( system_dma_device->mem_addrs[idx][i] );
214                 }
215             }
216         }
217 #endif
218 
219         kfree( ctx );
220 
221     } else {
222         LOG( LOG_CRIT, "Input ctx pointer is NULL" );
223     }
224     return result;
225 }
226 
dma_complete_func(void * ctx)227 static void dma_complete_func( void *ctx )
228 {
229     LOG( LOG_DEBUG, "\nIRQ completion called" );
230     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
231 
232     unsigned int nents_done = atomic_inc_return( &system_dma_device->nents_done );
233     if ( nents_done >= system_dma_device->sg_device_nents[system_dma_device->cur_fw_ctx_id][system_dma_device->buff_loc] ) {
234         if ( system_dma_device->complete_func ) {
235             system_dma_device->complete_func( ctx );
236             LOG( LOG_DEBUG, "async completed on buff:%d dir:%d", system_dma_device->buff_loc, system_dma_device->direction );
237         } else {
238             complete( &system_dma_device->comp );
239             LOG( LOG_DEBUG, "sync completed on buff:%d dir:%d", system_dma_device->buff_loc, system_dma_device->direction );
240         }
241     }
242 }
243 
244 #if FW_USE_SYSTEM_DMA
245 //sg from here
system_dma_sg_device_setup(void * ctx,int32_t buff_loc,dma_addr_pair_t * device_addr_pair,int32_t addr_pairs,uint32_t fw_ctx_id)246 int32_t system_dma_sg_device_setup( void *ctx, int32_t buff_loc, dma_addr_pair_t *device_addr_pair, int32_t addr_pairs, uint32_t fw_ctx_id )
247 {
248     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
249     struct scatterlist *sg;
250     int i, ret;
251 
252     if ( !system_dma_device || !device_addr_pair || !addr_pairs || buff_loc >= SYSTEM_DMA_TOGGLE_COUNT || fw_ctx_id >= FIRMWARE_CONTEXT_NUMBER )
253         return -1;
254 
255     struct sg_table *table = &system_dma_device->sg_device_table[fw_ctx_id][buff_loc];
256 
257     /* Allocate the scatterlist table */
258     ret = sg_alloc_table( table, addr_pairs, GFP_KERNEL );
259     if ( ret ) {
260         LOG( LOG_CRIT, "unable to allocate DMA table\n" );
261         return ret;
262     }
263     system_dma_device->sg_device_nents[fw_ctx_id][buff_loc] = addr_pairs;
264 
265     /* Add the DATA FPGA registers to the scatterlist */
266     sg = table->sgl;
267     for ( i = 0; i < addr_pairs; i++ ) {
268         sg_dma_address( sg ) = device_addr_pair[i].address;
269         sg_dma_len( sg ) = device_addr_pair[i].size;
270         sg = sg_next( sg );
271     }
272     LOG( LOG_INFO, "dma device setup success %d", system_dma_device->sg_device_nents[fw_ctx_id][buff_loc] );
273     return 0;
274 }
275 
system_dma_sg_fwmem_setup(void * ctx,int32_t buff_loc,fwmem_addr_pair_t * fwmem_pair,int32_t addr_pairs,uint32_t fw_ctx_id)276 int32_t system_dma_sg_fwmem_setup( void *ctx, int32_t buff_loc, fwmem_addr_pair_t *fwmem_pair, int32_t addr_pairs, uint32_t fw_ctx_id )
277 {
278     //unsigned int nr_pages;
279     int i, ret;
280     struct scatterlist *sg;
281     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
282 
283     if ( !system_dma_device || !fwmem_pair || !addr_pairs || buff_loc >= SYSTEM_DMA_TOGGLE_COUNT || fw_ctx_id >= FIRMWARE_CONTEXT_NUMBER ) {
284         LOG( LOG_CRIT, "null param problems" );
285         return -1;
286     }
287 
288     struct sg_table *table = &system_dma_device->sg_fwmem_table[fw_ctx_id][buff_loc];
289     /* Allocate the scatterlist table */
290     ret = sg_alloc_table( table, addr_pairs, GFP_KERNEL );
291     if ( ret ) {
292         LOG( LOG_CRIT, "unable to allocate DMA table\n" );
293         return ret;
294     }
295     system_dma_device->sg_fwmem_nents[fw_ctx_id][buff_loc] = addr_pairs;
296     system_dma_device->fwmem_pair_flush[fw_ctx_id][buff_loc] = kmalloc( sizeof( fwmem_addr_pair_t ) * addr_pairs, GFP_KERNEL );
297     if ( !system_dma_device->fwmem_pair_flush[fw_ctx_id][buff_loc] ) {
298         LOG( LOG_CRIT, "Failed to allocate virtual address pairs for flushing!!" );
299         return -1;
300     }
301     for ( i = 0; i < addr_pairs; i++ ) {
302         system_dma_device->fwmem_pair_flush[fw_ctx_id][buff_loc][i] = fwmem_pair[i];
303     }
304     sg = table->sgl;
305     for ( i = 0; i < addr_pairs; i++ ) {
306         sg_set_buf( sg, fwmem_pair[i].address, fwmem_pair[i].size );
307         sg = sg_next( sg );
308     }
309 
310     LOG( LOG_INFO, "fwmem setup success %d", system_dma_device->sg_device_nents[fw_ctx_id][buff_loc] );
311 
312     return 0;
313 }
314 
system_dma_unmap_sg(void * ctx)315 void system_dma_unmap_sg( void *ctx )
316 {
317     if ( !ctx )
318         return;
319     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
320     int32_t buff_loc = system_dma_device->buff_loc;
321     uint32_t direction = system_dma_device->direction;
322     uint32_t cur_fw_ctx_id = system_dma_device->cur_fw_ctx_id;
323     int i;
324     for ( i = 0; i < SYSTEM_DMA_MAX_CHANNEL; i++ ) {
325         struct dma_chan *chan = system_dma_device->dma_channel[i];
326         dma_unmap_sg( chan->device->dev, system_dma_device->sg_fwmem_table[cur_fw_ctx_id][buff_loc].sgl, system_dma_device->sg_fwmem_nents[cur_fw_ctx_id][buff_loc], direction );
327     }
328 }
329 
system_dma_copy_sg(void * ctx,int32_t buff_loc,uint32_t direction,dma_completion_callback complete_func,uint32_t fw_ctx_id)330 int32_t system_dma_copy_sg( void *ctx, int32_t buff_loc, uint32_t direction, dma_completion_callback complete_func, uint32_t fw_ctx_id )
331 {
332     int32_t i, result = 0;
333     if ( !ctx ) {
334         LOG( LOG_ERR, "Input ctx pointer is NULL" );
335         return -1;
336     }
337 
338     int32_t async_dma = 0;
339 
340     if ( complete_func != NULL ) {
341         async_dma = 1;
342     }
343 
344     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
345 
346     struct scatterlist *dst_sg, *src_sg;
347     unsigned int dst_nents, src_nents;
348     struct dma_chan *chan = system_dma_device->dma_channel[0]; //probe the first channel
349     struct dma_async_tx_descriptor *tx = NULL;
350     dma_cookie_t cookie;
351     enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_FENCE | DMA_PREP_INTERRUPT;
352 
353     if ( direction == SYS_DMA_TO_DEVICE ) {
354         dst_sg = system_dma_device->sg_device_table[fw_ctx_id][buff_loc].sgl;
355         dst_nents = system_dma_device->sg_device_nents[fw_ctx_id][buff_loc];
356         src_sg = system_dma_device->sg_fwmem_table[fw_ctx_id][buff_loc].sgl;
357         src_nents = system_dma_device->sg_fwmem_nents[fw_ctx_id][buff_loc];
358         direction = DMA_TO_DEVICE;
359     } else {
360         src_sg = system_dma_device->sg_device_table[fw_ctx_id][buff_loc].sgl;
361         src_nents = system_dma_device->sg_device_nents[fw_ctx_id][buff_loc];
362         dst_sg = system_dma_device->sg_fwmem_table[fw_ctx_id][buff_loc].sgl;
363         dst_nents = system_dma_device->sg_fwmem_nents[fw_ctx_id][buff_loc];
364         direction = DMA_FROM_DEVICE;
365     }
366 
367     if ( src_nents != dst_nents || !src_nents || !dst_nents ) {
368         LOG( LOG_CRIT, "Unbalance src_nents:%d dst_nents:%d", src_nents, dst_nents );
369         return -1;
370     }
371 
372     //flush memory before transfer
373     for ( i = 0; i < src_nents; i++ ) {
374         flush_icache_range( (unsigned long)( system_dma_device->fwmem_pair_flush[fw_ctx_id][buff_loc][i].address ), (unsigned long)( (uintptr_t)system_dma_device->fwmem_pair_flush[fw_ctx_id][buff_loc][i].address + system_dma_device->fwmem_pair_flush[fw_ctx_id][buff_loc][i].size ) );
375     }
376 
377     for ( i = 0; i < SYSTEM_DMA_MAX_CHANNEL; i++ ) {
378         chan = system_dma_device->dma_channel[i];
379 
380         result = dma_map_sg( chan->device->dev, system_dma_device->sg_fwmem_table[fw_ctx_id][buff_loc].sgl, system_dma_device->sg_fwmem_nents[fw_ctx_id][buff_loc], direction );
381         if ( result <= 0 ) {
382             LOG( LOG_CRIT, "unable to map %d", result );
383             return -1;
384         }
385         LOG( LOG_DEBUG, "src_nents:%d src_sg:%p dst_nents:%d dst_sg:%p dma map res:%d", src_nents, src_sg, dst_nents, dst_sg, result );
386     }
387     system_dma_device->cur_fw_ctx_id = fw_ctx_id;
388     /*
389      * All buffers passed to this function should be ready and mapped
390      * for DMA already. Therefore, we don't need to do anything except
391      * submit it to the Freescale DMA Engine for processing
392      */
393     atomic_set( &system_dma_device->nents_done, 0 ); //set the number of nents done
394     if ( async_dma == 0 ) {
395         system_dma_device->complete_func = NULL; //async mode is not allowed to have callback
396         init_completion( &system_dma_device->comp );
397     } else {
398         system_dma_device->complete_func = complete_func; //call this function if all nents are done;
399     }
400     system_dma_device->direction = direction;
401     system_dma_device->buff_loc = buff_loc;
402 
403     if ( !chan->device->device_prep_dma_sg ) {
404         LOG( LOG_DEBUG, "missing device_prep_dma_sg %p %p", chan->device->device_prep_dma_sg, chan->device->device_prep_interleaved_dma );
405 
406         for ( i = 0; i < src_nents; i++ ) {
407 
408             struct dma_chan *chan = system_dma_device->dma_channel[i];
409 
410             uint32_t dst = sg_dma_address( dst_sg );
411             uint32_t src = sg_dma_address( src_sg );
412             uint32_t size_to_copy = sg_dma_len( src_sg );
413             LOG( LOG_DEBUG, "src:0x%x (%d) to dst:0x%x (%d)", src, size_to_copy, dst, sg_dma_len( dst_sg ) );
414 
415             tx = chan->device->device_prep_dma_memcpy( chan, dst, src, size_to_copy, flags );
416             if ( tx ) {
417                 tx->callback = dma_complete_func;
418                 tx->callback_param = ctx;
419                 cookie = tx->tx_submit( tx );
420                 if ( dma_submit_error( cookie ) ) {
421                     LOG( LOG_CRIT, "unable to submit scatterlist DMA\n" );
422                     return -ENOMEM;
423                 }
424             } else {
425                 LOG( LOG_CRIT, "unable to prep scatterlist DMA\n" );
426                 return -ENOMEM;
427             }
428             dma_async_issue_pending( chan );
429             //}
430             dst_sg = sg_next( dst_sg );
431             src_sg = sg_next( src_sg );
432         }
433 
434     } else {
435         chan = system_dma_device->dma_channel[0];
436         /* setup the scatterlist to scatterlist transfer */
437         tx = chan->device->device_prep_dma_sg( chan,
438                                                dst_sg, dst_nents,
439                                                src_sg, src_nents,
440                                                0 );
441         if ( tx ) {
442             tx->callback = dma_complete_func;
443             tx->callback_param = ctx;
444             cookie = tx->tx_submit( tx );
445             if ( dma_submit_error( cookie ) ) {
446                 LOG( LOG_CRIT, "unable to submit scatterlist DMA\n" );
447                 return -ENOMEM;
448             }
449         } else {
450             LOG( LOG_CRIT, "unable to prep scatterlist DMA\n" );
451             return -ENOMEM;
452         }
453 
454         atomic_set( &system_dma_device->nents_done, dst_nents - 1 ); //only need to issue once
455         dma_async_issue_pending( chan );
456     }
457 
458 
459     if ( async_dma == 0 ) {
460         LOG( LOG_DEBUG, "scatterlist DMA waiting completion\n" );
461         wait_for_completion( &system_dma_device->comp );
462         for ( i = 0; i < SYSTEM_DMA_MAX_CHANNEL; i++ ) {
463             chan = system_dma_device->dma_channel[i];
464             dma_unmap_sg( chan->device->dev, system_dma_device->sg_fwmem_table[fw_ctx_id][buff_loc].sgl, system_dma_device->sg_fwmem_nents[fw_ctx_id][buff_loc], direction );
465         }
466     }
467 
468     LOG( LOG_DEBUG, "scatterlist DMA success\n" );
469     return result;
470 }
471 
472 #else
473 
system_dma_sg_device_setup(void * ctx,int32_t buff_loc,dma_addr_pair_t * device_addr_pair,int32_t addr_pairs,uint32_t fw_ctx_id)474 int32_t system_dma_sg_device_setup( void *ctx, int32_t buff_loc, dma_addr_pair_t *device_addr_pair, int32_t addr_pairs, uint32_t fw_ctx_id )
475 {
476     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
477     int i;
478 
479     if ( !system_dma_device || !device_addr_pair || !addr_pairs || buff_loc >= SYSTEM_DMA_TOGGLE_COUNT || addr_pairs > SYSTEM_DMA_MAX_CHANNEL || fw_ctx_id >= FIRMWARE_CONTEXT_NUMBER )
480         return -1;
481 
482     system_dma_device->sg_device_nents[fw_ctx_id][buff_loc] = addr_pairs;
483     if ( !system_dma_device->mem_addrs[fw_ctx_id][buff_loc] )
484         system_dma_device->mem_addrs[fw_ctx_id][buff_loc] = kmalloc( sizeof( mem_addr_pair_t ) * SYSTEM_DMA_MAX_CHANNEL, GFP_KERNEL );
485 
486     if ( !system_dma_device->mem_addrs[fw_ctx_id][buff_loc] ) {
487         LOG( LOG_CRIT, "Failed to allocate virtual address pairs for flushing!!" );
488         return -1;
489     }
490     for ( i = 0; i < addr_pairs; i++ ) {
491         system_dma_device->mem_addrs[fw_ctx_id][buff_loc][i].dev_addr = ioremap( device_addr_pair[i].address, device_addr_pair[i].size );
492         system_dma_device->mem_addrs[fw_ctx_id][buff_loc][i].size = device_addr_pair[i].size;
493         system_dma_device->mem_addrs[fw_ctx_id][buff_loc][i].sys_back_ptr = ctx;
494     }
495 
496     LOG( LOG_INFO, "dma device setup success %d", system_dma_device->sg_device_nents[buff_loc] );
497     return 0;
498 }
499 
system_dma_sg_fwmem_setup(void * ctx,int32_t buff_loc,fwmem_addr_pair_t * fwmem_pair,int32_t addr_pairs,uint32_t fw_ctx_id)500 int32_t system_dma_sg_fwmem_setup( void *ctx, int32_t buff_loc, fwmem_addr_pair_t *fwmem_pair, int32_t addr_pairs, uint32_t fw_ctx_id )
501 {
502     int i;
503     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
504 
505     if ( !system_dma_device || !fwmem_pair || !addr_pairs || buff_loc >= SYSTEM_DMA_TOGGLE_COUNT ) {
506         LOG( LOG_CRIT, "null param problems" );
507         return -1;
508     }
509     system_dma_device->sg_fwmem_nents[fw_ctx_id][buff_loc] = addr_pairs;
510 
511     if ( !system_dma_device->mem_addrs[fw_ctx_id][buff_loc] )
512         system_dma_device->mem_addrs[fw_ctx_id][buff_loc] = kmalloc( sizeof( mem_addr_pair_t ) * SYSTEM_DMA_MAX_CHANNEL, GFP_KERNEL );
513 
514     if ( !system_dma_device->mem_addrs[fw_ctx_id][buff_loc] ) {
515         LOG( LOG_CRIT, "Failed to allocate virtual address pairs for flushing!!" );
516         return -1;
517     }
518 
519     if ( !system_dma_device->mem_addrs[fw_ctx_id][buff_loc] ) {
520         LOG( LOG_CRIT, "Failed to allocate virtual address pairs for flushing!!" );
521         return -1;
522     }
523     for ( i = 0; i < addr_pairs; i++ ) {
524         system_dma_device->mem_addrs[fw_ctx_id][buff_loc][i].fw_addr = fwmem_pair[i].address;
525         system_dma_device->mem_addrs[fw_ctx_id][buff_loc][i].size = fwmem_pair[i].size;
526         system_dma_device->mem_addrs[fw_ctx_id][buff_loc][i].sys_back_ptr = ctx;
527     }
528 
529     LOG( LOG_INFO, "fwmem setup success %d", system_dma_device->sg_device_nents[fw_ctx_id][buff_loc] );
530 
531     return 0;
532 }
533 
system_memcpy_toio(volatile void __iomem * to,const void * from,size_t count)534 inline void system_memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
535 {
536     memcpy_toio(to, from, count);
537 /*
538     const unsigned int *f = from;
539     count /= 4;
540     while (count) {
541         count--;
542         __raw_writel(*f, to);
543         f++;
544         to += 4;
545     }
546 */
547 }
548 
system_memcpy_fromio(void * to,const volatile void __iomem * from,size_t count)549 inline void system_memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
550 {
551     memcpy_fromio(to, from ,count);
552 /*
553     unsigned int *t = to;
554     count /= 4;
555     while (count) {
556         count--;
557         *t = __raw_readl(from);
558         t++;
559         from += 4;
560     }
561 */
562 }
563 
memcopy_func(unsigned long p_task)564 static void memcopy_func( unsigned long p_task )
565 {
566     mem_tasklet_t *mem_task = (mem_tasklet_t *)p_task;
567     mem_addr_pair_t *mem_addr = (mem_addr_pair_t *)mem_task->mem_data;
568     system_dma_device_t *system_dma_device = (system_dma_device_t *)mem_addr->sys_back_ptr;
569     void *src_mem = 0;
570     void *dst_mem = 0;
571 
572     int32_t buff_loc = system_dma_device->buff_loc;
573     uint32_t direction = system_dma_device->direction;
574 
575     if ( direction == SYS_DMA_TO_DEVICE ) {
576         src_mem = mem_addr->fw_addr;
577         dst_mem = mem_addr->dev_addr;
578         system_memcpy_toio( dst_mem, src_mem, mem_addr->size);
579     } else {
580         dst_mem = mem_addr->fw_addr;
581         src_mem = mem_addr->dev_addr;
582         system_memcpy_fromio( dst_mem, src_mem, mem_addr->size );
583     }
584 
585     LOG( LOG_DEBUG, "(%d:%d) d:%p s:%p l:%ld", buff_loc, direction, dst_mem, src_mem, mem_addr->size );
586 
587     dma_complete_func( mem_addr->sys_back_ptr );
588 
589     return;
590 }
591 
system_dma_unmap_sg(void * ctx)592 void system_dma_unmap_sg( void *ctx )
593 {
594     return;
595 }
596 
system_dma_copy_sg(void * ctx,int32_t buff_loc,uint32_t direction,dma_completion_callback complete_func,uint32_t fw_ctx_id)597 int32_t system_dma_copy_sg( void *ctx, int32_t buff_loc, uint32_t direction, dma_completion_callback complete_func, uint32_t fw_ctx_id )
598 {
599     int32_t i, result = 0;
600     if ( !ctx ) {
601         LOG( LOG_ERR, "Input ctx pointer is NULL" );
602         return -1;
603     }
604 
605     int32_t async_dma = 0;
606 
607     if ( complete_func != NULL ) {
608         async_dma = 1;
609     }
610 
611     system_dma_device_t *system_dma_device = (system_dma_device_t *)ctx;
612 
613 
614     unsigned int src_nents = system_dma_device->sg_device_nents[fw_ctx_id][buff_loc];
615     unsigned int dst_nents = system_dma_device->sg_fwmem_nents[fw_ctx_id][buff_loc];
616     if ( src_nents != dst_nents || !src_nents || !dst_nents ) {
617         LOG( LOG_CRIT, "Unbalance src_nents:%d dst_nents:%d", src_nents, dst_nents );
618         return -1;
619     }
620 
621     system_dma_device->cur_fw_ctx_id = fw_ctx_id;
622 
623     atomic_set( &system_dma_device->nents_done, 0 ); //set the number of nents done
624     if ( async_dma == 0 ) {
625         system_dma_device->complete_func = NULL; //async mode is not allowed to have callback
626         init_completion( &system_dma_device->comp );
627     } else {
628         system_dma_device->complete_func = complete_func; //call this function if all nents are done;
629     }
630     system_dma_device->direction = direction;
631     system_dma_device->buff_loc = buff_loc;
632 
633 
634     for ( i = 0; i < SYSTEM_DMA_MAX_CHANNEL; i++ ) {
635         system_dma_device->task_list[fw_ctx_id][buff_loc][i].mem_data = &( system_dma_device->mem_addrs[fw_ctx_id][buff_loc][i] );
636         system_dma_device->task_list[fw_ctx_id][buff_loc][i].m_task.data = (unsigned long)&system_dma_device->task_list[fw_ctx_id][buff_loc][i];
637         system_dma_device->task_list[fw_ctx_id][buff_loc][i].m_task.func = memcopy_func;
638         tasklet_schedule( &system_dma_device->task_list[fw_ctx_id][buff_loc][i].m_task );
639     }
640 
641 
642     if ( async_dma == 0 ) {
643         LOG( LOG_DEBUG, "scatterlist DMA waiting completion\n" );
644         wait_for_completion( &system_dma_device->comp );
645     }
646 
647     LOG( LOG_DEBUG, "scatterlist DMA success\n" );
648     return result;
649 }
650 
651 #endif
652