/drivers/infiniband/hw/mlx5/ |
D | wr.c | 55 void **seg, int *size, void **cur_edge) in set_eth_seg() argument 57 struct mlx5_wqe_eth_seg *eseg = *seg; in set_eth_seg() 85 *seg += stride; in set_eth_seg() 88 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_eth_seg() 91 mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, in set_eth_seg() 98 *seg += sizeof(struct mlx5_wqe_eth_seg); in set_eth_seg() 189 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, in set_reg_mkey_seg() argument 195 memset(seg, 0, sizeof(*seg)); in set_reg_mkey_seg() 198 seg->log2_page_size = ilog2(mr->ibmr.page_size); in set_reg_mkey_seg() 203 seg->flags = get_umr_flags(access) | mr->access_mode; in set_reg_mkey_seg() [all …]
|
D | wr.h | 51 static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, in handle_post_send_edge() argument 56 if (likely(*seg != *cur_edge)) in handle_post_send_edge() 62 *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx); in handle_post_send_edge() 75 void **seg, u32 *wqe_sz, in mlx5r_memcpy_send_wqe() argument 79 size_t leftlen = *cur_edge - *seg; in mlx5r_memcpy_send_wqe() 83 memcpy(*seg, src, copysz); in mlx5r_memcpy_send_wqe() 88 *seg += stride; in mlx5r_memcpy_send_wqe() 90 handle_post_send_edge(sq, seg, *wqe_sz, cur_edge); in mlx5r_memcpy_send_wqe() 95 int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg, 100 void *seg, u8 size, void *cur_edge, unsigned int idx,
|
/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
D | dr_buddy.c | 79 unsigned int seg, order_iter, m; in dr_buddy_find_free_seg() local 87 seg = find_first_bit(buddy->bitmap[order_iter], m); in dr_buddy_find_free_seg() 89 if (WARN(seg >= m, in dr_buddy_find_free_seg() 100 *segment = seg; in dr_buddy_find_free_seg() 125 unsigned int seg, order_iter; in mlx5dr_buddy_alloc_mem() local 128 err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter); in mlx5dr_buddy_alloc_mem() 132 bitmap_clear(buddy->bitmap[order_iter], seg, 1); in mlx5dr_buddy_alloc_mem() 141 seg <<= 1; in mlx5dr_buddy_alloc_mem() 142 bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1); in mlx5dr_buddy_alloc_mem() 146 seg <<= order; in mlx5dr_buddy_alloc_mem() [all …]
|
D | dr_icm_pool.c | 64 return (u64)offset * chunk->seg; in mlx5dr_icm_pool_get_chunk_mr_addr() 76 return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg; in mlx5dr_icm_pool_get_chunk_icm_addr() 321 unsigned int seg) in dr_icm_chunk_create() argument 330 offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg; in dr_icm_chunk_create() 332 chunk->seg = seg; in dr_icm_chunk_create() 377 mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries)); in dr_icm_pool_sync_all_buddy_pools() 392 unsigned int *seg) in dr_icm_handle_buddies_get_mem() argument 402 chunk_size, seg); in dr_icm_handle_buddies_get_mem() 443 unsigned int seg; in mlx5dr_icm_alloc_chunk() local 451 ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg); in mlx5dr_icm_alloc_chunk() [all …]
|
/drivers/firmware/google/ |
D | memconsole-coreboot.c | 45 struct seg { /* describes ring buffer segments in logical order */ in memconsole_coreboot_read() struct 48 } seg[2] = { {0}, {0} }; in memconsole_coreboot_read() local 55 seg[0] = (struct seg){.phys = cursor, .len = size - cursor}; in memconsole_coreboot_read() 56 seg[1] = (struct seg){.phys = 0, .len = cursor}; in memconsole_coreboot_read() 58 seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)}; in memconsole_coreboot_read() 61 for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) { in memconsole_coreboot_read() 63 cbmem_console->body + seg[i].phys, seg[i].len); in memconsole_coreboot_read() 64 pos -= seg[i].len; in memconsole_coreboot_read()
|
/drivers/acpi/ |
D | pci_mcfg.c | 46 #define AL_ECAM(table_id, rev, seg, ops) \ argument 47 { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } 58 #define QCOM_ECAM32(seg) \ argument 59 { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } 70 #define HISI_QUAD_DOM(table_id, seg, ops) \ argument 71 { "HISI ", table_id, 0, (seg) + 0, MCFG_BUS_ANY, ops }, \ 72 { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \ 73 { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \ 74 { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops } 100 #define THUNDER_ECAM_QUIRK(rev, seg) \ argument [all …]
|
/drivers/usb/cdns3/ |
D | cdnsp-mem.c | 34 struct cdnsp_segment *seg; in cdnsp_segment_alloc() local 38 seg = kzalloc(sizeof(*seg), flags); in cdnsp_segment_alloc() 39 if (!seg) in cdnsp_segment_alloc() 42 seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma); in cdnsp_segment_alloc() 43 if (!seg->trbs) { in cdnsp_segment_alloc() 44 kfree(seg); in cdnsp_segment_alloc() 49 seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); in cdnsp_segment_alloc() 50 if (!seg->bounce_buf) in cdnsp_segment_alloc() 57 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); in cdnsp_segment_alloc() 59 seg->dma = dma; in cdnsp_segment_alloc() [all …]
|
D | cdnsp-ring.c | 71 dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, in cdnsp_trb_virt_to_dma() argument 74 unsigned long segment_offset = trb - seg->trbs; in cdnsp_trb_virt_to_dma() 76 if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT) in cdnsp_trb_virt_to_dma() 79 return seg->dma + (segment_offset * sizeof(*trb)); in cdnsp_trb_virt_to_dma() 92 bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb) in cdnsp_last_trb_on_seg() argument 94 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in cdnsp_last_trb_on_seg() 98 struct cdnsp_segment *seg, in cdnsp_last_trb_on_ring() argument 101 return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); in cdnsp_last_trb_on_ring() 131 struct cdnsp_segment **seg, in cdnsp_next_trb() argument 135 *seg = (*seg)->next; in cdnsp_next_trb() [all …]
|
/drivers/net/ethernet/intel/ice/ |
D | ice_ethtool_fdir.c | 621 ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, in ice_fdir_set_hw_fltr_rule() argument 656 if (!memcmp(old_seg, seg, sizeof(*seg))) in ice_fdir_set_hw_fltr_rule() 682 err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, in ice_fdir_set_hw_fltr_rule() 688 seg, &entry1_h); in ice_fdir_set_hw_fltr_rule() 693 seg, &entry2_h); in ice_fdir_set_hw_fltr_rule() 697 hw_prof->fdir_seg[tun] = seg; in ice_fdir_set_hw_fltr_rule() 716 ICE_FLOW_PRIO_NORMAL, seg, in ice_fdir_set_hw_fltr_rule() 781 ice_set_init_fdir_seg(struct ice_flow_seg_info *seg, in ice_set_init_fdir_seg() argument 787 if (!seg) in ice_set_init_fdir_seg() 810 ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto); in ice_set_init_fdir_seg() [all …]
|
D | ice_flow.c | 662 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) in ice_flow_calc_seg_sz() argument 667 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? in ice_flow_calc_seg_sz() 671 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) in ice_flow_calc_seg_sz() 673 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) in ice_flow_calc_seg_sz() 675 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) in ice_flow_calc_seg_sz() 677 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) in ice_flow_calc_seg_sz() 682 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) in ice_flow_calc_seg_sz() 684 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) in ice_flow_calc_seg_sz() 686 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) in ice_flow_calc_seg_sz() 688 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) in ice_flow_calc_seg_sz() [all …]
|
D | ice_virtchnl_fdir.c | 292 struct ice_flow_seg_info *seg) in ice_vc_fdir_set_flow_fld() argument 317 ice_flow_set_fld(seg, fld[i], in ice_vc_fdir_set_flow_fld() 336 struct ice_flow_seg_info *seg) in ice_vc_fdir_set_flow_hdr() argument 344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); in ice_vc_fdir_set_flow_hdr() 347 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | in ice_vc_fdir_set_flow_hdr() 352 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | in ice_vc_fdir_set_flow_hdr() 357 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | in ice_vc_fdir_set_flow_hdr() 362 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | in ice_vc_fdir_set_flow_hdr() 367 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | in ice_vc_fdir_set_flow_hdr() 372 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | in ice_vc_fdir_set_flow_hdr() [all …]
|
/drivers/auxdisplay/ |
D | ht16k33.c | 105 struct ht16k33_seg seg; member 146 memcpy(buf, &priv->seg.map, priv->seg.map_size); in map_seg_show() 147 return priv->seg.map_size; in map_seg_show() 155 if (cnt != priv->seg.map_size) in map_seg_store() 158 memcpy(&priv->seg.map, buf, cnt); in map_seg_store() 447 seg.linedisp); in ht16k33_linedisp_update() 456 struct ht16k33_seg *seg = &priv->seg; in ht16k33_seg7_update() local 457 char *s = seg->curr; in ht16k33_seg7_update() 460 buf[0] = map_to_seg7(&seg->map.seg7, *s++); in ht16k33_seg7_update() 462 buf[2] = map_to_seg7(&seg->map.seg7, *s++); in ht16k33_seg7_update() [all …]
|
/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_txrx.c | 84 struct sk_buff *skb, int seg, int *len) in otx2_dma_map_skb_frag() argument 91 if (!seg) { in otx2_dma_map_skb_frag() 96 frag = &skb_shinfo(skb)->frags[seg - 1]; in otx2_dma_map_skb_frag() 106 int seg; in otx2_dma_unmap_skb_frags() local 108 for (seg = 0; seg < sg->num_segs; seg++) { in otx2_dma_unmap_skb_frags() 109 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], in otx2_dma_unmap_skb_frags() 110 sg->size[seg], DMA_TO_DEVICE); in otx2_dma_unmap_skb_frags() 263 int seg; in otx2_free_rcv_seg() local 270 for (seg = 0; seg < sg->segs; seg++, seg_addr++) in otx2_free_rcv_seg() 352 int seg; in otx2_rcv_pkt_handler() local [all …]
|
/drivers/isdn/mISDN/ |
D | dsp_audio.c | 48 int seg; in linear2alaw() local 65 for (seg = 0; seg < 8; seg++) { in linear2alaw() 66 if (pcm_val <= seg_end[seg]) in linear2alaw() 70 return ((seg << 4) | in linear2alaw() 71 ((pcm_val >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^ mask; in linear2alaw() 78 int seg; in alaw2linear() local 82 seg = (((int) alaw & 0x70) >> 4); in alaw2linear() 83 if (seg) in alaw2linear() 84 i = (i + 0x100) << (seg - 1); in alaw2linear()
|
/drivers/infiniband/hw/mthca/ |
D | mthca_mr.c | 88 u32 seg; in mthca_buddy_alloc() local 95 seg = find_first_bit(buddy->bits[o], m); in mthca_buddy_alloc() 96 if (seg < m) in mthca_buddy_alloc() 104 __clear_bit(seg, buddy->bits[o]); in mthca_buddy_alloc() 109 seg <<= 1; in mthca_buddy_alloc() 110 __set_bit(seg ^ 1, buddy->bits[o]); in mthca_buddy_alloc() 116 seg <<= order; in mthca_buddy_alloc() 118 return seg; in mthca_buddy_alloc() 121 static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) in mthca_buddy_free() argument 123 seg >>= order; in mthca_buddy_free() [all …]
|
/drivers/char/agp/ |
D | compat_ioctl.c | 96 int seg; in compat_agpioc_reserve_wrap() local 122 for (seg = 0; seg < ureserve.seg_count; seg++) { in compat_agpioc_reserve_wrap() 123 ksegment[seg].pg_start = usegment[seg].pg_start; in compat_agpioc_reserve_wrap() 124 ksegment[seg].pg_count = usegment[seg].pg_count; in compat_agpioc_reserve_wrap() 125 ksegment[seg].prot = usegment[seg].prot; in compat_agpioc_reserve_wrap()
|
D | frontend.c | 105 struct agp_segment_priv *seg; in agp_find_seg_in_client() local 112 seg = *(client->segments); in agp_find_seg_in_client() 115 if ((seg[i].pg_start == pg_start) && in agp_find_seg_in_client() 116 (seg[i].pg_count == pg_count) && in agp_find_seg_in_client() 117 (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { in agp_find_seg_in_client() 118 return seg + i; in agp_find_seg_in_client() 141 struct agp_segment_priv ** seg, int num_segments) in agp_add_seg_to_client() argument 150 DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client); in agp_add_seg_to_client() 152 client->segments = seg; in agp_add_seg_to_client() 166 struct agp_segment_priv *seg; in agp_create_segment() local [all …]
|
/drivers/peci/ |
D | internal.h | 47 struct peci_request *peci_xfer_ep_pci_cfg_local_readb(struct peci_device *device, u8 seg, 49 struct peci_request *peci_xfer_ep_pci_cfg_local_readw(struct peci_device *device, u8 seg, 51 struct peci_request *peci_xfer_ep_pci_cfg_local_readl(struct peci_device *device, u8 seg, 54 struct peci_request *peci_xfer_ep_pci_cfg_readb(struct peci_device *device, u8 seg, 56 struct peci_request *peci_xfer_ep_pci_cfg_readw(struct peci_device *device, u8 seg, 58 struct peci_request *peci_xfer_ep_pci_cfg_readl(struct peci_device *device, u8 seg, 61 struct peci_request *peci_xfer_ep_mmio32_readl(struct peci_device *device, u8 bar, u8 seg, 64 struct peci_request *peci_xfer_ep_mmio64_readl(struct peci_device *device, u8 bar, u8 seg,
|
/drivers/media/platform/verisilicon/ |
D | hantro_g1_vp8_dec.c | 137 const struct v4l2_vp8_segment *seg = &hdr->segment; in cfg_lf() local 143 if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) { in cfg_lf() 145 } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) { in cfg_lf() 147 u32 lf_level = clamp(lf->level + seg->lf_update[i], in cfg_lf() 155 seg->lf_update[i]); in cfg_lf() 180 const struct v4l2_vp8_segment *seg = &hdr->segment; in cfg_qp() local 184 if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) { in cfg_qp() 186 } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) { in cfg_qp() 188 u32 quant = clamp(q->y_ac_qi + seg->quant_update[i], in cfg_qp() 196 seg->quant_update[i]); in cfg_qp() [all …]
|
D | rockchip_vpu2_hw_vp8_dec.c | 278 const struct v4l2_vp8_segment *seg = &hdr->segment; in cfg_lf() local 284 if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) { in cfg_lf() 286 } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) { in cfg_lf() 288 u32 lf_level = clamp(lf->level + seg->lf_update[i], in cfg_lf() 296 seg->lf_update[i]); in cfg_lf() 318 const struct v4l2_vp8_segment *seg = &hdr->segment; in cfg_qp() local 322 if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) { in cfg_qp() 324 } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) { in cfg_qp() 326 u32 quant = clamp(q->y_ac_qi + seg->quant_update[i], in cfg_qp() 334 seg->quant_update[i]); in cfg_qp() [all …]
|
/drivers/usb/host/ |
D | xhci-mem.c | 33 struct xhci_segment *seg; in xhci_segment_alloc() local 38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); in xhci_segment_alloc() 39 if (!seg) in xhci_segment_alloc() 42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); in xhci_segment_alloc() 43 if (!seg->trbs) { in xhci_segment_alloc() 44 kfree(seg); in xhci_segment_alloc() 49 seg->bounce_buf = kzalloc_node(max_packet, flags, in xhci_segment_alloc() 51 if (!seg->bounce_buf) { in xhci_segment_alloc() 52 dma_pool_free(xhci->segment_pool, seg->trbs, dma); in xhci_segment_alloc() 53 kfree(seg); in xhci_segment_alloc() [all …]
|
/drivers/thermal/ |
D | k3_j72xx_bandgap.c | 82 static void create_table_segments(struct err_values *err_vals, int seg, in create_table_segments() argument 87 if (seg == 0) in create_table_segments() 90 idx1 = err_vals->refs[seg]; in create_table_segments() 92 idx2 = err_vals->refs[seg + 1]; in create_table_segments() 93 err1 = err_vals->errs[seg]; in create_table_segments() 94 err2 = err_vals->errs[seg + 1]; in create_table_segments() 95 ref1 = err_vals->refs[seg]; in create_table_segments() 96 ref2 = err_vals->refs[seg + 1]; in create_table_segments() 130 int inc, i, seg; in prep_lookup_table() local 138 for (seg = 0; seg < 3; seg++) in prep_lookup_table() [all …]
|
/drivers/staging/media/rkvdec/ |
D | rkvdec-vp9.c | 153 struct v4l2_vp9_segmentation seg; member 318 const struct v4l2_vp9_segmentation *seg; in init_probs() local 324 seg = &dec_params->seg; in init_probs() 337 memcpy(rkprobs->pred, seg->pred_probs, sizeof(rkprobs->pred)); in init_probs() 338 memcpy(rkprobs->tree, seg->tree_probs, sizeof(rkprobs->tree)); in init_probs() 452 const struct v4l2_vp9_segmentation *seg; in config_seg_registers() local 458 seg = vp9_ctx->last.valid ? &vp9_ctx->last.seg : &vp9_ctx->cur.seg; in config_seg_registers() 460 if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) { in config_seg_registers() 461 feature_val = seg->feature_data[segid][feature_id]; in config_seg_registers() 467 if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) { in config_seg_registers() [all …]
|
/drivers/xen/ |
D | gntdev.c | 859 struct gntdev_grant_copy_segment *seg, in gntdev_grant_copy_seg() argument 869 if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref))) in gntdev_grant_copy_seg() 873 if (seg->flags & GNTCOPY_source_gref) { in gntdev_grant_copy_seg() 874 if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE) in gntdev_grant_copy_seg() 877 if (seg->flags & GNTCOPY_dest_gref) { in gntdev_grant_copy_seg() 878 if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE) in gntdev_grant_copy_seg() 885 while (copied < seg->len) { in gntdev_grant_copy_seg() 898 len = seg->len - copied; in gntdev_grant_copy_seg() 903 if (seg->flags & GNTCOPY_source_gref) { in gntdev_grant_copy_seg() 904 op->source.u.ref = seg->source.foreign.ref; in gntdev_grant_copy_seg() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | soc15.h | 55 uint32_t seg; member 62 uint32_t seg; member 72 uint32_t seg; member 81 uint32_t seg; member 91 #define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.…
|