| /kernel/linux/linux-5.10/drivers/crypto/ |
| D | omap-des.c | 47 #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) 49 #define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ argument 52 #define DES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) argument 54 #define DES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) argument 61 #define DES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) argument 63 #define DES_REG_REV(dd) ((dd)->pdata->rev_ofs) argument 65 #define DES_REG_MASK(dd) ((dd)->pdata->mask_ofs) argument 69 #define DES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) argument 70 #define DES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) argument 87 struct omap_des_dev *dd; member [all …]
|
| D | omap-aes.c | 49 #define omap_aes_read(dd, offset) \ argument 52 _read_ret = __raw_readl(dd->io_base + offset); \ 58 inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) in omap_aes_read() argument 60 return __raw_readl(dd->io_base + offset); in omap_aes_read() 65 #define omap_aes_write(dd, offset, value) \ argument 69 __raw_writel(value, dd->io_base + offset); \ 72 inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, in omap_aes_write() argument 75 __raw_writel(value, dd->io_base + offset); in omap_aes_write() 79 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, in omap_aes_write_mask() argument 84 val = omap_aes_read(dd, offset); in omap_aes_write_mask() [all …]
|
| D | atmel-tdes.c | 75 struct atmel_tdes_dev *dd; member 181 static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset) in atmel_tdes_read() argument 183 return readl_relaxed(dd->io_base + offset); in atmel_tdes_read() 186 static inline void atmel_tdes_write(struct atmel_tdes_dev *dd, in atmel_tdes_write() argument 189 writel_relaxed(value, dd->io_base + offset); in atmel_tdes_write() 192 static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset, in atmel_tdes_write_n() argument 196 atmel_tdes_write(dd, offset, *value); in atmel_tdes_write_n() 205 if (!ctx->dd) { in atmel_tdes_find_dev() 210 ctx->dd = tdes_dd; in atmel_tdes_find_dev() 212 tdes_dd = ctx->dd; in atmel_tdes_find_dev() [all …]
|
| D | omap-aes-gcm.c | 26 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, 29 static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret) in omap_aes_gcm_finish_req() argument 31 struct aead_request *req = dd->aead_req; in omap_aes_gcm_finish_req() 33 dd->in_sg = NULL; in omap_aes_gcm_finish_req() 34 dd->out_sg = NULL; in omap_aes_gcm_finish_req() 36 crypto_finalize_aead_request(dd->engine, req, ret); in omap_aes_gcm_finish_req() 38 pm_runtime_mark_last_busy(dd->dev); in omap_aes_gcm_finish_req() 39 pm_runtime_put_autosuspend(dd->dev); in omap_aes_gcm_finish_req() 42 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) in omap_aes_gcm_done_task() argument 48 alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE); in omap_aes_gcm_done_task() [all …]
|
| D | atmel-aes.c | 104 struct atmel_aes_dev *dd; member 345 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) in atmel_aes_read() argument 347 u32 value = readl_relaxed(dd->io_base + offset); in atmel_aes_read() 350 if (dd->flags & AES_FLAGS_DUMP_REG) { in atmel_aes_read() 353 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, in atmel_aes_read() 361 static inline void atmel_aes_write(struct atmel_aes_dev *dd, in atmel_aes_write() argument 365 if (dd->flags & AES_FLAGS_DUMP_REG) { in atmel_aes_write() 368 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, in atmel_aes_write() 373 writel_relaxed(value, dd->io_base + offset); in atmel_aes_write() 376 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, in atmel_aes_read_n() argument [all …]
|
| /kernel/linux/linux-6.6/drivers/crypto/ |
| D | omap-des.c | 43 #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) 45 #define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ argument 48 #define DES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) argument 50 #define DES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) argument 57 #define DES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) argument 59 #define DES_REG_REV(dd) ((dd)->pdata->rev_ofs) argument 61 #define DES_REG_MASK(dd) ((dd)->pdata->mask_ofs) argument 65 #define DES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) argument 66 #define DES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) argument 82 struct omap_des_dev *dd; member [all …]
|
| D | omap-aes.c | 47 #define omap_aes_read(dd, offset) \ argument 50 _read_ret = __raw_readl(dd->io_base + offset); \ 56 inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) in omap_aes_read() argument 58 return __raw_readl(dd->io_base + offset); in omap_aes_read() 63 #define omap_aes_write(dd, offset, value) \ argument 67 __raw_writel(value, dd->io_base + offset); \ 70 inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, in omap_aes_write() argument 73 __raw_writel(value, dd->io_base + offset); in omap_aes_write() 77 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, in omap_aes_write_mask() argument 82 val = omap_aes_read(dd, offset); in omap_aes_write_mask() [all …]
|
| D | atmel-tdes.c | 75 struct atmel_tdes_dev *dd; member 181 static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset) in atmel_tdes_read() argument 183 return readl_relaxed(dd->io_base + offset); in atmel_tdes_read() 186 static inline void atmel_tdes_write(struct atmel_tdes_dev *dd, in atmel_tdes_write() argument 189 writel_relaxed(value, dd->io_base + offset); in atmel_tdes_write() 192 static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset, in atmel_tdes_write_n() argument 196 atmel_tdes_write(dd, offset, *value); in atmel_tdes_write_n() 211 static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd) in atmel_tdes_hw_init() argument 215 err = clk_prepare_enable(dd->iclk); in atmel_tdes_hw_init() 219 if (!(dd->flags & TDES_FLAGS_INIT)) { in atmel_tdes_hw_init() [all …]
|
| D | omap-aes-gcm.c | 29 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, 32 static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret) in omap_aes_gcm_finish_req() argument 34 struct aead_request *req = dd->aead_req; in omap_aes_gcm_finish_req() 36 dd->in_sg = NULL; in omap_aes_gcm_finish_req() 37 dd->out_sg = NULL; in omap_aes_gcm_finish_req() 39 crypto_finalize_aead_request(dd->engine, req, ret); in omap_aes_gcm_finish_req() 41 pm_runtime_mark_last_busy(dd->dev); in omap_aes_gcm_finish_req() 42 pm_runtime_put_autosuspend(dd->dev); in omap_aes_gcm_finish_req() 45 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) in omap_aes_gcm_done_task() argument 51 alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE); in omap_aes_gcm_done_task() [all …]
|
| D | atmel-aes.c | 104 struct atmel_aes_dev *dd; member 347 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) in atmel_aes_read() argument 349 u32 value = readl_relaxed(dd->io_base + offset); in atmel_aes_read() 352 if (dd->flags & AES_FLAGS_DUMP_REG) { in atmel_aes_read() 355 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, in atmel_aes_read() 363 static inline void atmel_aes_write(struct atmel_aes_dev *dd, in atmel_aes_write() argument 367 if (dd->flags & AES_FLAGS_DUMP_REG) { in atmel_aes_write() 370 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, in atmel_aes_write() 375 writel_relaxed(value, dd->io_base + offset); in atmel_aes_write() 378 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, in atmel_aes_read_n() argument [all …]
|
| /kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
| D | qib_iba6120.c | 298 * @dd: device 306 static inline u32 qib_read_ureg32(const struct qib_devdata *dd, in qib_read_ureg32() argument 309 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg32() 312 if (dd->userbase) in qib_read_ureg32() 314 ((char __iomem *)dd->userbase + in qib_read_ureg32() 315 dd->ureg_align * ctxt)); in qib_read_ureg32() 318 (dd->uregbase + in qib_read_ureg32() 319 (char __iomem *)dd->kregbase + in qib_read_ureg32() 320 dd->ureg_align * ctxt)); in qib_read_ureg32() 325 * @dd: device [all …]
|
| D | qib_init.c | 102 void qib_set_ctxtcnt(struct qib_devdata *dd) in qib_set_ctxtcnt() argument 105 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); in qib_set_ctxtcnt() 106 if (dd->cfgctxts > dd->ctxtcnt) in qib_set_ctxtcnt() 107 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt() 108 } else if (qib_cfgctxts < dd->num_pports) in qib_set_ctxtcnt() 109 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt() 110 else if (qib_cfgctxts <= dd->ctxtcnt) in qib_set_ctxtcnt() 111 dd->cfgctxts = qib_cfgctxts; in qib_set_ctxtcnt() 113 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt() 114 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : in qib_set_ctxtcnt() [all …]
|
| D | qib_iba7220.c | 222 * @dd: device 230 static inline u32 qib_read_ureg32(const struct qib_devdata *dd, in qib_read_ureg32() argument 233 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg32() 236 if (dd->userbase) in qib_read_ureg32() 238 ((char __iomem *)dd->userbase + in qib_read_ureg32() 239 dd->ureg_align * ctxt)); in qib_read_ureg32() 242 (dd->uregbase + in qib_read_ureg32() 243 (char __iomem *)dd->kregbase + in qib_read_ureg32() 244 dd->ureg_align * ctxt)); in qib_read_ureg32() 249 * @dd: device [all …]
|
| D | qib_tx.c | 54 * @dd: the qlogic_ib device 61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt) in qib_disarm_piobufs() argument 68 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs() 70 __clear_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs() 71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs() 73 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs() 82 struct qib_devdata *dd = rcd->dd; in qib_disarm_piobufs_ifneeded() local 103 spin_lock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded() 105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) { in qib_disarm_piobufs_ifneeded() 107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded() [all …]
|
| /kernel/linux/linux-6.6/drivers/infiniband/hw/qib/ |
| D | qib_iba6120.c | 298 * @dd: device 306 static inline u32 qib_read_ureg32(const struct qib_devdata *dd, in qib_read_ureg32() argument 309 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg32() 312 if (dd->userbase) in qib_read_ureg32() 314 ((char __iomem *)dd->userbase + in qib_read_ureg32() 315 dd->ureg_align * ctxt)); in qib_read_ureg32() 318 (dd->uregbase + in qib_read_ureg32() 319 (char __iomem *)dd->kregbase + in qib_read_ureg32() 320 dd->ureg_align * ctxt)); in qib_read_ureg32() 325 * @dd: device [all …]
|
| D | qib_init.c | 102 void qib_set_ctxtcnt(struct qib_devdata *dd) in qib_set_ctxtcnt() argument 105 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); in qib_set_ctxtcnt() 106 if (dd->cfgctxts > dd->ctxtcnt) in qib_set_ctxtcnt() 107 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt() 108 } else if (qib_cfgctxts < dd->num_pports) in qib_set_ctxtcnt() 109 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt() 110 else if (qib_cfgctxts <= dd->ctxtcnt) in qib_set_ctxtcnt() 111 dd->cfgctxts = qib_cfgctxts; in qib_set_ctxtcnt() 113 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt() 114 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : in qib_set_ctxtcnt() [all …]
|
| D | qib_iba7220.c | 222 * @dd: device 230 static inline u32 qib_read_ureg32(const struct qib_devdata *dd, in qib_read_ureg32() argument 233 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) in qib_read_ureg32() 236 if (dd->userbase) in qib_read_ureg32() 238 ((char __iomem *)dd->userbase + in qib_read_ureg32() 239 dd->ureg_align * ctxt)); in qib_read_ureg32() 242 (dd->uregbase + in qib_read_ureg32() 243 (char __iomem *)dd->kregbase + in qib_read_ureg32() 244 dd->ureg_align * ctxt)); in qib_read_ureg32() 249 * @dd: device [all …]
|
| /kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
| D | msix.c | 13 * @dd: valid hfi1 devdata 16 int msix_initialize(struct hfi1_devdata *dd) in msix_initialize() argument 30 total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts; in msix_initialize() 35 ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX); in msix_initialize() 37 dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret); in msix_initialize() 41 entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries), in msix_initialize() 44 pci_free_irq_vectors(dd->pcidev); in msix_initialize() 48 dd->msix_info.msix_entries = entries; in msix_initialize() 49 spin_lock_init(&dd->msix_info.msix_lock); in msix_initialize() 50 bitmap_zero(dd->msix_info.in_use_msix, total); in msix_initialize() [all …]
|
| D | init.c | 85 static int hfi1_create_kctxt(struct hfi1_devdata *dd, in hfi1_create_kctxt() argument 94 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); in hfi1_create_kctxt() 96 dd_dev_err(dd, "Kernel receive context allocation failed\n"); in hfi1_create_kctxt() 119 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); in hfi1_create_kctxt() 121 dd_dev_err(dd, "Kernel send context allocation failed\n"); in hfi1_create_kctxt() 132 int hfi1_create_kctxts(struct hfi1_devdata *dd) in hfi1_create_kctxts() argument 137 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), in hfi1_create_kctxts() 138 GFP_KERNEL, dd->node); in hfi1_create_kctxts() 139 if (!dd->rcd) in hfi1_create_kctxts() 142 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_create_kctxts() [all …]
|
| D | pcie.c | 24 int hfi1_pcie_init(struct hfi1_devdata *dd) in hfi1_pcie_init() argument 27 struct pci_dev *pdev = dd->pcidev; in hfi1_pcie_init() 43 dd_dev_err(dd, "pci enable failed: error %d\n", -ret); in hfi1_pcie_init() 49 dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret); in hfi1_pcie_init() 62 dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret); in hfi1_pcie_init() 89 * Do remaining PCIe setup, once dd is allocated, and save away 93 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) in hfi1_pcie_ddinit() argument 110 dd_dev_err(dd, "chip PIO range does not match\n"); in hfi1_pcie_ddinit() 114 dd->kregbase1 = ioremap(addr, RCV_ARRAY); in hfi1_pcie_ddinit() 115 if (!dd->kregbase1) { in hfi1_pcie_ddinit() [all …]
|
| /kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
| D | init.c | 126 static int hfi1_create_kctxt(struct hfi1_devdata *dd, in hfi1_create_kctxt() argument 135 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); in hfi1_create_kctxt() 137 dd_dev_err(dd, "Kernel receive context allocation failed\n"); in hfi1_create_kctxt() 160 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); in hfi1_create_kctxt() 162 dd_dev_err(dd, "Kernel send context allocation failed\n"); in hfi1_create_kctxt() 173 int hfi1_create_kctxts(struct hfi1_devdata *dd) in hfi1_create_kctxts() argument 178 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd), in hfi1_create_kctxts() 179 GFP_KERNEL, dd->node); in hfi1_create_kctxts() 180 if (!dd->rcd) in hfi1_create_kctxts() 183 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) { in hfi1_create_kctxts() [all …]
|
| D | pcie.c | 67 int hfi1_pcie_init(struct hfi1_devdata *dd) in hfi1_pcie_init() argument 70 struct pci_dev *pdev = dd->pcidev; in hfi1_pcie_init() 86 dd_dev_err(dd, "pci enable failed: error %d\n", -ret); in hfi1_pcie_init() 92 dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret); in hfi1_pcie_init() 105 dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret); in hfi1_pcie_init() 113 dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret); in hfi1_pcie_init() 140 * Do remaining PCIe setup, once dd is allocated, and save away 144 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) in hfi1_pcie_ddinit() argument 161 dd_dev_err(dd, "chip PIO range does not match\n"); in hfi1_pcie_ddinit() 165 dd->kregbase1 = ioremap(addr, RCV_ARRAY); in hfi1_pcie_ddinit() [all …]
|
| /kernel/linux/linux-6.6/drivers/md/ |
| D | dm-dust.c | 78 static int dust_remove_block(struct dust_device *dd, unsigned long long block) in dust_remove_block() argument 83 spin_lock_irqsave(&dd->dust_lock, flags); in dust_remove_block() 84 bblock = dust_rb_search(&dd->badblocklist, block); in dust_remove_block() 87 if (!dd->quiet_mode) { in dust_remove_block() 91 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_remove_block() 95 rb_erase(&bblock->node, &dd->badblocklist); in dust_remove_block() 96 dd->badblock_count--; in dust_remove_block() 97 if (!dd->quiet_mode) in dust_remove_block() 100 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_remove_block() 105 static int dust_add_block(struct dust_device *dd, unsigned long long block, in dust_add_block() argument [all …]
|
| /kernel/linux/linux-5.10/drivers/md/ |
| D | dm-dust.c | 78 static int dust_remove_block(struct dust_device *dd, unsigned long long block) in dust_remove_block() argument 83 spin_lock_irqsave(&dd->dust_lock, flags); in dust_remove_block() 84 bblock = dust_rb_search(&dd->badblocklist, block); in dust_remove_block() 87 if (!dd->quiet_mode) { in dust_remove_block() 91 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_remove_block() 95 rb_erase(&bblock->node, &dd->badblocklist); in dust_remove_block() 96 dd->badblock_count--; in dust_remove_block() 97 if (!dd->quiet_mode) in dust_remove_block() 100 spin_unlock_irqrestore(&dd->dust_lock, flags); in dust_remove_block() 105 static int dust_add_block(struct dust_device *dd, unsigned long long block, in dust_add_block() argument [all …]
|
| /kernel/linux/linux-5.10/drivers/block/mtip32xx/ |
| D | mtip32xx.c | 114 static int mtip_block_initialize(struct driver_data *dd); 142 struct driver_data *dd = pci_get_drvdata(pdev); in mtip_check_surprise_removal() local 144 if (dd->sr) in mtip_check_surprise_removal() 150 dd->sr = true; in mtip_check_surprise_removal() 151 if (dd->queue) in mtip_check_surprise_removal() 152 blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue); in mtip_check_surprise_removal() 154 dev_warn(&dd->pdev->dev, in mtip_check_surprise_removal() 155 "%s: dd->queue is NULL\n", __func__); in mtip_check_surprise_removal() 162 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, in mtip_cmd_from_tag() argument 165 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; in mtip_cmd_from_tag() [all …]
|