/drivers/dma/ |
D | txx9dmac.c | 27 static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) in __dma_regs() argument 29 return dc->ch_regs; in __dma_regs() 33 const struct txx9dmac_chan *dc) in __dma_regs32() argument 35 return dc->ch_regs; in __dma_regs32() 38 #define channel64_readq(dc, name) \ argument 39 __raw_readq(&(__dma_regs(dc)->name)) 40 #define channel64_writeq(dc, name, val) \ argument 41 __raw_writeq((val), &(__dma_regs(dc)->name)) 42 #define channel64_readl(dc, name) \ argument 43 __raw_readl(&(__dma_regs(dc)->name)) [all …]
|
D | txx9dmac.h | 196 static inline bool is_dmac64(const struct txx9dmac_chan *dc) in is_dmac64() argument 198 return __is_dmac64(dc->ddev); in is_dmac64() 240 static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc) in txx9dmac_chan_INTENT() argument 242 return (dc->ccr & TXX9_DMA_CCR_INTENT) != 0; in txx9dmac_chan_INTENT() 245 static inline void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc) in txx9dmac_chan_set_INTENT() argument 247 dc->ccr |= TXX9_DMA_CCR_INTENT; in txx9dmac_chan_set_INTENT() 255 static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc) in txx9dmac_chan_set_SMPCHN() argument 257 dc->ccr |= TXX9_DMA_CCR_SMPCHN; in txx9dmac_chan_set_SMPCHN() 268 static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc) in txx9dmac_chan_INTENT() argument 273 static void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc) in txx9dmac_chan_set_INTENT() argument [all …]
|
/drivers/tty/ |
D | nozomi.c | 369 struct nozomi *dc; member 526 static void nozomi_setup_memory(struct nozomi *dc) in nozomi_setup_memory() argument 528 void __iomem *offset = dc->base_addr + dc->config_table.dl_start; in nozomi_setup_memory() 535 dc->port[PORT_MDM].dl_addr[CH_A] = offset; in nozomi_setup_memory() 536 dc->port[PORT_MDM].dl_addr[CH_B] = in nozomi_setup_memory() 537 (offset += dc->config_table.dl_mdm_len1); in nozomi_setup_memory() 538 dc->port[PORT_MDM].dl_size[CH_A] = in nozomi_setup_memory() 539 dc->config_table.dl_mdm_len1 - buff_offset; in nozomi_setup_memory() 540 dc->port[PORT_MDM].dl_size[CH_B] = in nozomi_setup_memory() 541 dc->config_table.dl_mdm_len2 - buff_offset; in nozomi_setup_memory() [all …]
|
/drivers/md/ |
D | dm-delay.c | 49 struct delay_c *dc = (struct delay_c *)data; in handle_delayed_timer() local 51 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); in handle_delayed_timer() 54 static void queue_timeout(struct delay_c *dc, unsigned long expires) in queue_timeout() argument 56 mutex_lock(&dc->timer_lock); in queue_timeout() 58 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) in queue_timeout() 59 mod_timer(&dc->delay_timer, expires); in queue_timeout() 61 mutex_unlock(&dc->timer_lock); in queue_timeout() 76 static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) in flush_delayed_bios() argument 84 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { in flush_delayed_bios() 107 queue_timeout(dc, next_expires); in flush_delayed_bios() [all …]
|
/drivers/md/bcache/ |
D | writeback.c | 20 static void __update_writeback_rate(struct cached_dev *dc) in __update_writeback_rate() argument 22 struct cache_set *c = dc->disk.c; in __update_writeback_rate() 26 div_u64(cache_sectors * dc->writeback_percent, 100); in __update_writeback_rate() 28 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), in __update_writeback_rate() 33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); in __update_writeback_rate() 34 int64_t derivative = dirty - dc->disk.sectors_dirty_last; in __update_writeback_rate() 38 dc->disk.sectors_dirty_last = dirty; in __update_writeback_rate() 42 proportional *= dc->writeback_rate_update_seconds; in __update_writeback_rate() 43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); in __update_writeback_rate() 45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds); in __update_writeback_rate() [all …]
|
D | writeback.h | 43 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, in bcache_dev_stripe_dirty() argument 47 unsigned stripe = offset_to_stripe(&dc->disk, offset); in bcache_dev_stripe_dirty() 50 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) in bcache_dev_stripe_dirty() 53 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty() 56 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty() 61 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, in should_writeback() argument 64 unsigned in_use = dc->disk.c->gc_stats.in_use; in should_writeback() 67 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in should_writeback() 71 if (dc->partial_stripes_expensive && in should_writeback() 72 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, in should_writeback() [all …]
|
D | super.c | 202 struct cached_dev *dc = bio->bi_private; in write_bdev_super_endio() local 205 closure_put(&dc->sb_write); in write_bdev_super_endio() 245 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); in bch_write_bdev_super_unlock() local 247 up(&dc->sb_write_mutex); in bch_write_bdev_super_unlock() 250 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) in bch_write_bdev_super() argument 252 struct closure *cl = &dc->sb_write; in bch_write_bdev_super() 253 struct bio *bio = &dc->sb_bio; in bch_write_bdev_super() 255 down(&dc->sb_write_mutex); in bch_write_bdev_super() 259 bio->bi_bdev = dc->bdev; in bch_write_bdev_super() 261 bio->bi_private = dc; in bch_write_bdev_super() [all …]
|
D | request.c | 29 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) in cache_mode() argument 31 return BDEV_CACHE_MODE(&dc->sb); in cache_mode() 34 static bool verify(struct cached_dev *dc, struct bio *bio) in verify() argument 36 return dc->verify; in verify() 364 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) in iohash() argument 366 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; in iohash() 369 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) in check_should_bypass() argument 371 struct cache_set *c = dc->disk.c; in check_should_bypass() 372 unsigned mode = cache_mode(dc, bio); in check_should_bypass() 377 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in check_should_bypass() [all …]
|
D | sysfs.c | 111 struct cached_dev *dc = container_of(kobj, struct cached_dev, in SHOW() local 115 #define var(stat) (dc->stat) in SHOW() 120 BDEV_CACHE_MODE(&dc->sb)); in SHOW() 122 sysfs_printf(data_csum, "%i", dc->disk.data_csum); in SHOW() 129 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); in SHOW() 144 bch_hprint(rate, dc->writeback_rate.rate << 9); in SHOW() 145 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); in SHOW() 146 bch_hprint(target, dc->writeback_rate_target << 9); in SHOW() 147 bch_hprint(proportional,dc->writeback_rate_proportional << 9); in SHOW() 148 bch_hprint(derivative, dc->writeback_rate_derivative << 9); in SHOW() [all …]
|
/drivers/gpu/drm/tegra/ |
D | dc.c | 93 static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset) in tegra_dc_readl_active() argument 98 spin_lock_irqsave(&dc->lock, flags); in tegra_dc_readl_active() 100 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); in tegra_dc_readl_active() 101 value = tegra_dc_readl(dc, offset); in tegra_dc_readl_active() 102 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); in tegra_dc_readl_active() 104 spin_unlock_irqrestore(&dc->lock, flags); in tegra_dc_readl_active() 120 void tegra_dc_commit(struct tegra_dc *dc) in tegra_dc_commit() argument 122 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); in tegra_dc_commit() 123 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); in tegra_dc_commit() 241 static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, in tegra_dc_setup_window() argument [all …]
|
D | rgb.c | 20 struct tegra_dc *dc; member 80 static void tegra_dc_write_regs(struct tegra_dc *dc, in tegra_dc_write_regs() argument 87 tegra_dc_writel(dc, table[i].value, table[i].offset); in tegra_dc_write_regs() 129 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); in tegra_rgb_encoder_disable() 130 tegra_dc_commit(rgb->dc); in tegra_rgb_encoder_disable() 145 tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); in tegra_rgb_encoder_enable() 148 tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS); in tegra_rgb_encoder_enable() 151 value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1)); in tegra_rgb_encoder_enable() 154 tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1)); in tegra_rgb_encoder_enable() 159 tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL); in tegra_rgb_encoder_enable() [all …]
|
D | drm.h | 160 static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value, in tegra_dc_writel() argument 163 writel(value, dc->regs + (offset << 2)); in tegra_dc_writel() 166 static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset) in tegra_dc_readl() argument 168 return readl(dc->regs + (offset << 2)); in tegra_dc_readl() 195 u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc); 196 void tegra_dc_enable_vblank(struct tegra_dc *dc); 197 void tegra_dc_disable_vblank(struct tegra_dc *dc); 198 void tegra_dc_commit(struct tegra_dc *dc); 199 int tegra_dc_state_setup_clock(struct tegra_dc *dc, 230 int tegra_dc_rgb_probe(struct tegra_dc *dc); [all …]
|
/drivers/isdn/hisax/ |
D | arcofi.c | 24 del_timer(&cs->dc.isac.arcofitimer); in add_arcofi_timer() 26 init_timer(&cs->dc.isac.arcofitimer); in add_arcofi_timer() 27 cs->dc.isac.arcofitimer.expires = jiffies + ((ARCOFI_TIMER_VALUE * HZ) / 1000); in add_arcofi_timer() 28 add_timer(&cs->dc.isac.arcofitimer); in add_arcofi_timer() 34 cs->dc.isac.mon_txp = 0; in send_arcofi() 35 cs->dc.isac.mon_txc = cs->dc.isac.arcofi_list->len; in send_arcofi() 36 memcpy(cs->dc.isac.mon_tx, cs->dc.isac.arcofi_list->msg, cs->dc.isac.mon_txc); in send_arcofi() 37 switch (cs->dc.isac.arcofi_bc) { in send_arcofi() 39 case 1: cs->dc.isac.mon_tx[1] |= 0x40; in send_arcofi() 43 cs->dc.isac.mocr &= 0x0f; in send_arcofi() [all …]
|
D | isac.c | 51 switch (cs->dc.isac.ph_state) { in isac_new_ph() 269 cs->dc.isac.ph_state = (exval >> 2) & 0xf; in isac_interrupt() 271 debugl1(cs, "ph_state change %x", cs->dc.isac.ph_state); in isac_interrupt() 318 if (!cs->dc.isac.mon_rx) { in isac_interrupt() 319 cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC); in isac_interrupt() 320 if (!cs->dc.isac.mon_rx) { in isac_interrupt() 323 cs->dc.isac.mocr &= 0xf0; in isac_interrupt() 324 cs->dc.isac.mocr |= 0x0a; in isac_interrupt() 325 cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr); in isac_interrupt() 328 cs->dc.isac.mon_rxp = 0; in isac_interrupt() [all …]
|
D | icc.c | 52 switch (cs->dc.icc.ph_state) { in icc_new_ph() 263 cs->dc.icc.ph_state = (exval >> 2) & 0xf; in icc_interrupt() 265 debugl1(cs, "ph_state change %x", cs->dc.icc.ph_state); in icc_interrupt() 312 if (!cs->dc.icc.mon_rx) { in icc_interrupt() 313 if (!(cs->dc.icc.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { in icc_interrupt() 316 cs->dc.icc.mocr &= 0xf0; in icc_interrupt() 317 cs->dc.icc.mocr |= 0x0a; in icc_interrupt() 318 cs->writeisac(cs, ICC_MOCR, cs->dc.icc.mocr); in icc_interrupt() 321 cs->dc.icc.mon_rxp = 0; in icc_interrupt() 323 if (cs->dc.icc.mon_rxp >= MAX_MON_FRAME) { in icc_interrupt() [all …]
|
D | amd7930_fn.c | 134 cs->dc.amd7930.lmr1 = command; in Amd7930_ph_command() 162 cs->dc.amd7930.ph_state = (lsr & 0x7) + 2; in Amd7930_get_state() 171 …u_char index = stateHelper[cs->dc.amd7930.old_state] * 8 + stateHelper[cs->dc.amd7930.ph_state] - … in Amd7930_new_ph() 176 cs->dc.amd7930.ph_state, cs->dc.amd7930.old_state, message & 0x0f, index); in Amd7930_new_ph() 178 cs->dc.amd7930.old_state = cs->dc.amd7930.ph_state; in Amd7930_new_ph() 223 cs->dc.amd7930.old_state = 3; in Amd7930_new_ph() 359 if (!cs->dc.amd7930.tx_xmtlen) in Amd7930_fill_Dfifo() 363 else len = cs->dc.amd7930.tx_xmtlen; in Amd7930_fill_Dfifo() 391 if (!cs->dc.amd7930.tx_xmtlen) { in Amd7930_fill_Dfifo() 393 cs->dc.amd7930.tx_xmtlen = dtcrw; in Amd7930_fill_Dfifo() [all …]
|
/drivers/scsi/esas2r/ |
D | esas2r_disc.c | 291 struct esas2r_disc_context *dc = &a->disc_ctx; in esas2r_disc_queue_event() local 298 dc->disc_evt |= disc_evt; in esas2r_disc_queue_event() 314 struct esas2r_disc_context *dc = &a->disc_ctx; in esas2r_disc_start_port() local 326 if (dc->disc_evt) { in esas2r_disc_start_port() 352 esas2r_trace("disc_evt: %d", dc->disc_evt); in esas2r_disc_start_port() 354 dc->flags = 0; in esas2r_disc_start_port() 357 dc->flags |= DCF_POLLED; in esas2r_disc_start_port() 359 rq->interrupt_cx = dc; in esas2r_disc_start_port() 363 if (dc->disc_evt & DCDE_DEV_SCAN) { in esas2r_disc_start_port() 364 dc->disc_evt &= ~DCDE_DEV_SCAN; in esas2r_disc_start_port() [all …]
|
D | esas2r_targdb.c | 121 dc) in esas2r_targ_db_add_raid() 127 if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { in esas2r_targ_db_add_raid() 133 t = a->targetdb + dc->curr_virt_id; in esas2r_targ_db_add_raid() 140 esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name, in esas2r_targ_db_add_raid() 145 if (dc->interleave == 0 in esas2r_targ_db_add_raid() 146 || dc->block_size == 0) { in esas2r_targ_db_add_raid() 156 t->block_size = dc->block_size; in esas2r_targ_db_add_raid() 157 t->inter_byte = dc->interleave; in esas2r_targ_db_add_raid() 158 t->inter_block = dc->interleave / dc->block_size; in esas2r_targ_db_add_raid() 159 t->virt_targ_id = dc->curr_virt_id; in esas2r_targ_db_add_raid() [all …]
|
/drivers/clk/mvebu/ |
D | dove-divider.c | 50 static unsigned int dove_get_divider(struct dove_clk *dc) in dove_get_divider() argument 55 val = readl_relaxed(dc->base + DIV_CTRL0); in dove_get_divider() 56 val >>= dc->div_bit_start; in dove_get_divider() 58 divider = val & ~(~0 << dc->div_bit_size); in dove_get_divider() 60 if (dc->divider_table) in dove_get_divider() 61 divider = dc->divider_table[divider]; in dove_get_divider() 66 static int dove_calc_divider(const struct dove_clk *dc, unsigned long rate, in dove_calc_divider() argument 73 if (dc->divider_table) { in dove_calc_divider() 76 for (i = 0; dc->divider_table[i]; i++) in dove_calc_divider() 77 if (divider == dc->divider_table[i]) { in dove_calc_divider() [all …]
|
/drivers/gpu/drm/atmel-hlcdc/ |
D | atmel_hlcdc_dc.c | 366 int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc, in atmel_hlcdc_dc_mode_valid() argument 376 if (hsync_len > dc->desc->max_spw + 1 || hsync_len < 1) in atmel_hlcdc_dc_mode_valid() 379 if (vsync_len > dc->desc->max_spw + 1 || vsync_len < 1) in atmel_hlcdc_dc_mode_valid() 382 if (hfront_porch > dc->desc->max_hpw + 1 || hfront_porch < 1 || in atmel_hlcdc_dc_mode_valid() 383 hback_porch > dc->desc->max_hpw + 1 || hback_porch < 1 || in atmel_hlcdc_dc_mode_valid() 387 if (vfront_porch > dc->desc->max_vpw + 1 || vfront_porch < 1 || in atmel_hlcdc_dc_mode_valid() 388 vback_porch > dc->desc->max_vpw || vback_porch < 0 || in atmel_hlcdc_dc_mode_valid() 398 struct atmel_hlcdc_dc *dc = dev->dev_private; in atmel_hlcdc_dc_irq_handler() local 403 regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_IMR, &imr); in atmel_hlcdc_dc_irq_handler() 404 regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); in atmel_hlcdc_dc_irq_handler() [all …]
|
/drivers/gpu/ipu-v3/ |
D | ipu-dc.c | 120 static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority) in dc_link_event() argument 124 reg = readl(dc->base + DC_RL_CH(event)); in dc_link_event() 127 writel(reg, dc->base + DC_RL_CH(event)); in dc_link_event() 130 static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand, in dc_write_tmpl() argument 133 struct ipu_dc_priv *priv = dc->priv; in dc_write_tmpl() 171 int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced, in ipu_dc_init_sync() argument 174 struct ipu_dc_priv *priv = dc->priv; in ipu_dc_init_sync() 179 dc->di = ipu_di_get_num(di); in ipu_dc_init_sync() 192 if (dc->di) in ipu_dc_init_sync() 198 dc_link_event(dc, DC_EVT_NL, addr, 3); in ipu_dc_init_sync() [all …]
|
/drivers/media/pci/ttpci/ |
D | av7110_hw.c | 1061 int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc) in av7110_osd_cmd() argument 1068 switch (dc->cmd) { in av7110_osd_cmd() 1073 av7110->osdbpp[av7110->osdwin] = (dc->color - 1) & 7; in av7110_osd_cmd() 1076 dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1); in av7110_osd_cmd() 1079 if (!dc->data) { in av7110_osd_cmd() 1080 ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0); in av7110_osd_cmd() 1096 ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, dc->color); in av7110_osd_cmd() 1099 ret = OSDSetColor(av7110, dc->color, dc->x0, dc->y0, dc->x1, dc->y1); in av7110_osd_cmd() 1103 ret = OSDSetPalette(av7110, dc->data, dc->color, dc->x0); in av7110_osd_cmd() 1105 int i, len = dc->x0-dc->color+1; in av7110_osd_cmd() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_diag.c | 80 struct qib_diag_client *dc; in get_client() local 82 dc = client_pool; in get_client() 83 if (dc) in get_client() 85 client_pool = dc->next; in get_client() 88 dc = kmalloc(sizeof(*dc), GFP_KERNEL); in get_client() 90 if (dc) { in get_client() 91 dc->next = NULL; in get_client() 92 dc->dd = dd; in get_client() 93 dc->pid = current->pid; in get_client() 94 dc->state = OPENED; in get_client() [all …]
|
/drivers/misc/mic/vop/ |
D | vop_main.c | 61 struct mic_device_ctrl __iomem *dc; member 203 struct mic_device_ctrl __iomem *dc = vdev->dc; in vop_reset_inform_host() local 207 iowrite8(0, &dc->host_ack); in vop_reset_inform_host() 208 iowrite8(1, &dc->vdev_reset); in vop_reset_inform_host() 213 if (ioread8(&dc->host_ack)) in vop_reset_inform_host() 381 struct mic_device_ctrl __iomem *dc = vdev->dc; in vop_find_vqs() local 398 iowrite8(1, &dc->used_address_updated); in vop_find_vqs() 405 if (!ioread8(&dc->used_address_updated)) in vop_find_vqs() 480 vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d); in _vop_add_device() 493 iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db); in _vop_add_device() [all …]
|
/drivers/usb/musb/ |
D | musb_cppi41.c | 135 struct dma_chan *dc = cppi41_channel->dc; in cppi41_trans_done() local 149 dma_desc = dmaengine_prep_slave_single(dc, in cppi41_trans_done() 161 dma_async_issue_pending(dc); in cppi41_trans_done() 223 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, in cppi41_dma_callback() 372 struct dma_chan *dc = cppi41_channel->dc; in cppi41_configure_channel() local 419 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction, in cppi41_configure_channel() 432 dma_async_issue_pending(dc); in cppi41_configure_channel() 452 if (!cppi41_channel->dc) in cppi41_dma_channel_allocate() 577 ret = dmaengine_terminate_all(cppi41_channel->dc); in cppi41_dma_channel_abort() 596 struct dma_chan *dc; in cppi41_release_all_dma_chans() local [all …]
|