• Home
  • Raw
  • Download

Lines Matching refs:dd

49 #define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))  argument
50 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) argument
51 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) argument
53 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04)) argument
63 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs) argument
65 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs) argument
71 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) argument
74 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs) argument
88 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs) argument
145 struct omap_sham_dev *dd; member
171 struct omap_sham_dev *dd; member
196 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
198 void (*trigger)(struct omap_sham_dev *dd, size_t length);
199 int (*poll_irq)(struct omap_sham_dev *dd);
249 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) in omap_sham_read() argument
251 return __raw_readl(dd->io_base + offset); in omap_sham_read()
254 static inline void omap_sham_write(struct omap_sham_dev *dd, in omap_sham_write() argument
257 __raw_writel(value, dd->io_base + offset); in omap_sham_write()
260 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, in omap_sham_write_mask() argument
265 val = omap_sham_read(dd, address); in omap_sham_write_mask()
268 omap_sham_write(dd, address, val); in omap_sham_write_mask()
271 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) in omap_sham_wait() argument
275 while (!(omap_sham_read(dd, offset) & bit)) { in omap_sham_wait()
286 struct omap_sham_dev *dd = ctx->dd; in omap_sham_copy_hash_omap2() local
290 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { in omap_sham_copy_hash_omap2()
292 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i)); in omap_sham_copy_hash_omap2()
294 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]); in omap_sham_copy_hash_omap2()
301 struct omap_sham_dev *dd = ctx->dd; in omap_sham_copy_hash_omap4() local
305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); in omap_sham_copy_hash_omap4()
310 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { in omap_sham_copy_hash_omap4()
312 opad[i] = omap_sham_read(dd, in omap_sham_copy_hash_omap4()
313 SHA_REG_ODIGEST(dd, i)); in omap_sham_copy_hash_omap4()
315 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i), in omap_sham_copy_hash_omap4()
339 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags)) in omap_sham_copy_ready_hash()
367 static int omap_sham_hw_init(struct omap_sham_dev *dd) in omap_sham_hw_init() argument
371 err = pm_runtime_get_sync(dd->dev); in omap_sham_hw_init()
373 dev_err(dd->dev, "failed to get sync: %d\n", err); in omap_sham_hw_init()
377 if (!test_bit(FLAGS_INIT, &dd->flags)) { in omap_sham_hw_init()
378 set_bit(FLAGS_INIT, &dd->flags); in omap_sham_hw_init()
379 dd->err = 0; in omap_sham_hw_init()
385 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length, in omap_sham_write_ctrl_omap2() argument
388 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_write_ctrl_omap2()
392 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); in omap_sham_write_ctrl_omap2()
394 omap_sham_write_mask(dd, SHA_REG_MASK(dd), in omap_sham_write_ctrl_omap2()
411 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); in omap_sham_write_ctrl_omap2()
414 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length) in omap_sham_trigger_omap2() argument
418 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd) in omap_sham_poll_irq_omap2() argument
420 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); in omap_sham_poll_irq_omap2()
447 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, in omap_sham_write_n() argument
451 omap_sham_write(dd, offset, *value); in omap_sham_write_n()
454 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, in omap_sham_write_ctrl_omap4() argument
457 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_write_ctrl_omap4()
467 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); in omap_sham_write_ctrl_omap4()
478 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0), in omap_sham_write_ctrl_omap4()
480 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0), in omap_sham_write_ctrl_omap4()
497 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); in omap_sham_write_ctrl_omap4()
498 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask); in omap_sham_write_ctrl_omap4()
499 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); in omap_sham_write_ctrl_omap4()
500 omap_sham_write_mask(dd, SHA_REG_MASK(dd), in omap_sham_write_ctrl_omap4()
506 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) in omap_sham_trigger_omap4() argument
508 omap_sham_write(dd, SHA_REG_LENGTH(dd), length); in omap_sham_trigger_omap4()
511 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) in omap_sham_poll_irq_omap4() argument
513 return omap_sham_wait(dd, SHA_REG_IRQSTATUS, in omap_sham_poll_irq_omap4()
517 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length, in omap_sham_xmit_cpu() argument
520 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_xmit_cpu()
526 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", in omap_sham_xmit_cpu()
529 dd->pdata->write_ctrl(dd, length, final, 0); in omap_sham_xmit_cpu()
530 dd->pdata->trigger(dd, length); in omap_sham_xmit_cpu()
537 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ in omap_sham_xmit_cpu()
539 set_bit(FLAGS_CPU, &dd->flags); in omap_sham_xmit_cpu()
550 if (dd->pdata->poll_irq(dd)) in omap_sham_xmit_cpu()
564 omap_sham_write(dd, SHA_REG_DIN(dd, count), in omap_sham_xmit_cpu()
578 struct omap_sham_dev *dd = param; in omap_sham_dma_callback() local
580 set_bit(FLAGS_DMA_READY, &dd->flags); in omap_sham_dma_callback()
581 tasklet_schedule(&dd->done_task); in omap_sham_dma_callback()
584 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length, in omap_sham_xmit_dma() argument
587 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_xmit_dma()
592 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", in omap_sham_xmit_dma()
595 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { in omap_sham_xmit_dma()
596 dev_err(dd->dev, "dma_map_sg error\n"); in omap_sham_xmit_dma()
602 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); in omap_sham_xmit_dma()
606 ret = dmaengine_slave_config(dd->dma_lch, &cfg); in omap_sham_xmit_dma()
612 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len, in omap_sham_xmit_dma()
617 dev_err(dd->dev, "prep_slave_sg failed\n"); in omap_sham_xmit_dma()
622 tx->callback_param = dd; in omap_sham_xmit_dma()
624 dd->pdata->write_ctrl(dd, length, final, 1); in omap_sham_xmit_dma()
630 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ in omap_sham_xmit_dma()
632 set_bit(FLAGS_DMA_ACTIVE, &dd->flags); in omap_sham_xmit_dma()
635 dma_async_issue_pending(dd->dma_lch); in omap_sham_xmit_dma()
637 dd->pdata->trigger(dd, length); in omap_sham_xmit_dma()
663 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); in omap_sham_copy_sg_lists()
692 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags); in omap_sham_copy_sg_lists()
717 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); in omap_sham_copy_sgs()
724 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags); in omap_sham_copy_sgs()
840 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt); in omap_sham_prepare_request()
864 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); in omap_sham_prepare_request()
874 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len); in omap_sham_prepare_request()
908 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) in omap_sham_update_dma_stop() argument
910 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); in omap_sham_update_dma_stop()
912 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); in omap_sham_update_dma_stop()
914 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags); in omap_sham_update_dma_stop()
924 struct omap_sham_dev *dd = NULL, *tmp; in omap_sham_init() local
928 if (!tctx->dd) { in omap_sham_init()
930 dd = tmp; in omap_sham_init()
933 tctx->dd = dd; in omap_sham_init()
935 dd = tctx->dd; in omap_sham_init()
939 ctx->dd = dd; in omap_sham_init()
943 dev_dbg(dd->dev, "init: digest size: %d\n", in omap_sham_init()
980 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { in omap_sham_init()
994 static int omap_sham_update_req(struct omap_sham_dev *dd) in omap_sham_update_req() argument
996 struct ahash_request *req = dd->req; in omap_sham_update_req()
1001 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", in omap_sham_update_req()
1009 err = omap_sham_xmit_cpu(dd, ctx->total, final); in omap_sham_update_req()
1011 err = omap_sham_xmit_dma(dd, ctx->total, final); in omap_sham_update_req()
1014 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); in omap_sham_update_req()
1019 static int omap_sham_final_req(struct omap_sham_dev *dd) in omap_sham_final_req() argument
1021 struct ahash_request *req = dd->req; in omap_sham_final_req()
1025 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode) in omap_sham_final_req()
1033 err = omap_sham_xmit_dma(dd, ctx->total, 1); in omap_sham_final_req()
1035 err = omap_sham_xmit_cpu(dd, ctx->total, 1); in omap_sham_final_req()
1039 dev_dbg(dd->dev, "final_req: err: %d\n", err); in omap_sham_final_req()
1063 struct omap_sham_dev *dd = ctx->dd; in omap_sham_finish() local
1069 !test_bit(FLAGS_AUTO_XOR, &dd->flags)) in omap_sham_finish()
1073 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); in omap_sham_finish()
1081 struct omap_sham_dev *dd = ctx->dd; in omap_sham_finish_req() local
1083 if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) in omap_sham_finish_req()
1087 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) in omap_sham_finish_req()
1092 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED)); in omap_sham_finish_req()
1095 dd->pdata->copy_hash(req, 1); in omap_sham_finish_req()
1096 if (test_bit(FLAGS_FINAL, &dd->flags)) in omap_sham_finish_req()
1103 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | in omap_sham_finish_req()
1106 pm_runtime_mark_last_busy(dd->dev); in omap_sham_finish_req()
1107 pm_runtime_put_autosuspend(dd->dev); in omap_sham_finish_req()
1113 static int omap_sham_handle_queue(struct omap_sham_dev *dd, in omap_sham_handle_queue() argument
1122 spin_lock_irqsave(&dd->lock, flags); in omap_sham_handle_queue()
1124 ret = ahash_enqueue_request(&dd->queue, req); in omap_sham_handle_queue()
1125 if (test_bit(FLAGS_BUSY, &dd->flags)) { in omap_sham_handle_queue()
1126 spin_unlock_irqrestore(&dd->lock, flags); in omap_sham_handle_queue()
1129 backlog = crypto_get_backlog(&dd->queue); in omap_sham_handle_queue()
1130 async_req = crypto_dequeue_request(&dd->queue); in omap_sham_handle_queue()
1132 set_bit(FLAGS_BUSY, &dd->flags); in omap_sham_handle_queue()
1133 spin_unlock_irqrestore(&dd->lock, flags); in omap_sham_handle_queue()
1142 dd->req = req; in omap_sham_handle_queue()
1149 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", in omap_sham_handle_queue()
1152 err = omap_sham_hw_init(dd); in omap_sham_handle_queue()
1158 dd->pdata->copy_hash(req, 0); in omap_sham_handle_queue()
1161 err = omap_sham_update_req(dd); in omap_sham_handle_queue()
1164 err = omap_sham_final_req(dd); in omap_sham_handle_queue()
1166 err = omap_sham_final_req(dd); in omap_sham_handle_queue()
1169 dev_dbg(dd->dev, "exit, err: %d\n", err); in omap_sham_handle_queue()
1190 struct omap_sham_dev *dd = tctx->dd; in omap_sham_enqueue() local
1194 return omap_sham_handle_queue(dd, req); in omap_sham_enqueue()
1200 struct omap_sham_dev *dd = ctx->dd; in omap_sham_update() local
1212 if (dd->polling_mode) in omap_sham_update()
1241 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags)) in omap_sham_final_shash()
1305 struct omap_sham_dev *dd = NULL, *tmp; in omap_sham_setkey() local
1309 if (!tctx->dd) { in omap_sham_setkey()
1311 dd = tmp; in omap_sham_setkey()
1314 tctx->dd = dd; in omap_sham_setkey()
1316 dd = tctx->dd; in omap_sham_setkey()
1337 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { in omap_sham_setkey()
1742 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; in omap_sham_done_task() local
1745 if (!test_bit(FLAGS_BUSY, &dd->flags)) { in omap_sham_done_task()
1746 omap_sham_handle_queue(dd, NULL); in omap_sham_done_task()
1750 if (test_bit(FLAGS_CPU, &dd->flags)) { in omap_sham_done_task()
1751 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) in omap_sham_done_task()
1753 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { in omap_sham_done_task()
1754 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { in omap_sham_done_task()
1755 omap_sham_update_dma_stop(dd); in omap_sham_done_task()
1756 if (dd->err) { in omap_sham_done_task()
1757 err = dd->err; in omap_sham_done_task()
1761 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { in omap_sham_done_task()
1763 clear_bit(FLAGS_DMA_READY, &dd->flags); in omap_sham_done_task()
1771 dev_dbg(dd->dev, "update done: err: %d\n", err); in omap_sham_done_task()
1773 omap_sham_finish_req(dd->req, err); in omap_sham_done_task()
1776 if (!test_bit(FLAGS_BUSY, &dd->flags)) in omap_sham_done_task()
1777 omap_sham_handle_queue(dd, NULL); in omap_sham_done_task()
1780 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) in omap_sham_irq_common() argument
1782 if (!test_bit(FLAGS_BUSY, &dd->flags)) { in omap_sham_irq_common()
1783 dev_warn(dd->dev, "Interrupt when no active requests.\n"); in omap_sham_irq_common()
1785 set_bit(FLAGS_OUTPUT_READY, &dd->flags); in omap_sham_irq_common()
1786 tasklet_schedule(&dd->done_task); in omap_sham_irq_common()
1794 struct omap_sham_dev *dd = dev_id; in omap_sham_irq_omap2() local
1796 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) in omap_sham_irq_omap2()
1798 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); in omap_sham_irq_omap2()
1800 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, in omap_sham_irq_omap2()
1802 omap_sham_read(dd, SHA_REG_CTRL); in omap_sham_irq_omap2()
1804 return omap_sham_irq_common(dd); in omap_sham_irq_omap2()
1809 struct omap_sham_dev *dd = dev_id; in omap_sham_irq_omap4() local
1811 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN); in omap_sham_irq_omap4()
1813 return omap_sham_irq_common(dd); in omap_sham_irq_omap4()
1943 static int omap_sham_get_res_of(struct omap_sham_dev *dd, in omap_sham_get_res_of() argument
1964 dd->irq = irq_of_parse_and_map(node, 0); in omap_sham_get_res_of()
1965 if (!dd->irq) { in omap_sham_get_res_of()
1971 dd->pdata = match->data; in omap_sham_get_res_of()
1981 static int omap_sham_get_res_of(struct omap_sham_dev *dd, in omap_sham_get_res_of() argument
1988 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd, in omap_sham_get_res_pdev() argument
2005 dd->irq = platform_get_irq(pdev, 0); in omap_sham_get_res_pdev()
2006 if (dd->irq < 0) { in omap_sham_get_res_pdev()
2008 err = dd->irq; in omap_sham_get_res_pdev()
2013 dd->pdata = &omap_sham_pdata_omap2; in omap_sham_get_res_pdev()
2021 struct omap_sham_dev *dd; in omap_sham_probe() local
2028 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL); in omap_sham_probe()
2029 if (dd == NULL) { in omap_sham_probe()
2034 dd->dev = dev; in omap_sham_probe()
2035 platform_set_drvdata(pdev, dd); in omap_sham_probe()
2037 INIT_LIST_HEAD(&dd->list); in omap_sham_probe()
2038 spin_lock_init(&dd->lock); in omap_sham_probe()
2039 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); in omap_sham_probe()
2040 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); in omap_sham_probe()
2042 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : in omap_sham_probe()
2043 omap_sham_get_res_pdev(dd, pdev, &res); in omap_sham_probe()
2047 dd->io_base = devm_ioremap_resource(dev, &res); in omap_sham_probe()
2048 if (IS_ERR(dd->io_base)) { in omap_sham_probe()
2049 err = PTR_ERR(dd->io_base); in omap_sham_probe()
2052 dd->phys_base = res.start; in omap_sham_probe()
2054 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr, in omap_sham_probe()
2055 IRQF_TRIGGER_NONE, dev_name(dev), dd); in omap_sham_probe()
2058 dd->irq, err); in omap_sham_probe()
2065 dd->dma_lch = dma_request_chan(dev, "rx"); in omap_sham_probe()
2066 if (IS_ERR(dd->dma_lch)) { in omap_sham_probe()
2067 err = PTR_ERR(dd->dma_lch); in omap_sham_probe()
2071 dd->polling_mode = 1; in omap_sham_probe()
2075 dd->flags |= dd->pdata->flags; in omap_sham_probe()
2089 rev = omap_sham_read(dd, SHA_REG_REV(dd)); in omap_sham_probe()
2093 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift, in omap_sham_probe()
2094 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift); in omap_sham_probe()
2097 list_add_tail(&dd->list, &sham.dev_list); in omap_sham_probe()
2100 for (i = 0; i < dd->pdata->algs_info_size; i++) { in omap_sham_probe()
2101 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { in omap_sham_probe()
2104 alg = &dd->pdata->algs_info[i].algs_list[j]; in omap_sham_probe()
2113 dd->pdata->algs_info[i].registered++; in omap_sham_probe()
2120 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) in omap_sham_probe()
2121 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) in omap_sham_probe()
2123 &dd->pdata->algs_info[i].algs_list[j]); in omap_sham_probe()
2126 if (!dd->polling_mode) in omap_sham_probe()
2127 dma_release_channel(dd->dma_lch); in omap_sham_probe()
2136 struct omap_sham_dev *dd; in omap_sham_remove() local
2139 dd = platform_get_drvdata(pdev); in omap_sham_remove()
2140 if (!dd) in omap_sham_remove()
2143 list_del(&dd->list); in omap_sham_remove()
2145 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) in omap_sham_remove()
2146 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) in omap_sham_remove()
2148 &dd->pdata->algs_info[i].algs_list[j]); in omap_sham_remove()
2149 tasklet_kill(&dd->done_task); in omap_sham_remove()
2152 if (!dd->polling_mode) in omap_sham_remove()
2153 dma_release_channel(dd->dma_lch); in omap_sham_remove()