Home
last modified time | relevance | path

Searched refs:cap (Results 1 – 25 of 676) sorted by relevance

12345678910>>...28

/drivers/media/platform/rockchip/rkisp1/
Drkisp1-capture.c63 void (*config)(struct rkisp1_capture *cap);
64 void (*stop)(struct rkisp1_capture *cap);
65 void (*enable)(struct rkisp1_capture *cap);
66 void (*disable)(struct rkisp1_capture *cap);
67 void (*set_data_path)(struct rkisp1_capture *cap);
68 bool (*is_stopped)(struct rkisp1_capture *cap);
367 int rkisp1_cap_enum_mbus_codes(struct rkisp1_capture *cap, in rkisp1_cap_enum_mbus_codes() argument
370 const struct rkisp1_capture_fmt_cfg *fmts = cap->config->fmts; in rkisp1_cap_enum_mbus_codes()
378 for (i = 0; i < cap->config->fmt_size; i++) { in rkisp1_cap_enum_mbus_codes()
395 static void rkisp1_mi_config_ctrl(struct rkisp1_capture *cap) in rkisp1_mi_config_ctrl() argument
[all …]
/drivers/staging/greybus/
Dauthentication.c47 struct gb_cap *cap = container_of(kref, struct gb_cap, kref); in cap_kref_release() local
49 kfree(cap); in cap_kref_release()
57 static void put_cap(struct gb_cap *cap) in put_cap() argument
59 kref_put(&cap->kref, cap_kref_release); in put_cap()
65 struct gb_cap *cap; in get_cap() local
69 list_for_each_entry(cap, &cap_list, node) { in get_cap()
70 if (&cap->cdev == cdev) { in get_cap()
71 kref_get(&cap->kref); in get_cap()
76 cap = NULL; in get_cap()
81 return cap; in get_cap()
[all …]
/drivers/net/ethernet/samsung/sxgbe/
Dsxgbe_reg.h409 #define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1) argument
410 #define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4) argument
411 #define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5) argument
412 #define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6) argument
413 #define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7) argument
414 #define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8) argument
415 #define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9) argument
416 #define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12) argument
417 #define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13) argument
418 #define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14) argument
[all …]
/drivers/vfio/pci/
Dvfio_pci_zdev.c25 struct vfio_device_info_cap_zpci_base cap = { in zpci_base_cap() local
38 return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); in zpci_base_cap()
46 struct vfio_device_info_cap_zpci_group cap = { in zpci_group_cap() local
60 return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); in zpci_group_cap()
68 struct vfio_device_info_cap_zpci_util *cap; in zpci_util_cap() local
69 int cap_size = sizeof(*cap) + CLP_UTIL_STR_LEN; in zpci_util_cap()
72 cap = kmalloc(cap_size, GFP_KERNEL); in zpci_util_cap()
73 if (!cap) in zpci_util_cap()
76 cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_UTIL; in zpci_util_cap()
77 cap->header.version = 1; in zpci_util_cap()
[all …]
/drivers/iommu/intel/
Dcap_audit.h67 #define DO_CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \ argument
69 if (cap##_##feature(a) != cap##_##feature(b)) { \
70 intel_iommu_##cap##_sanity &= ~(MASK); \
75 #define CHECK_FEATURE_MISMATCH(a, b, cap, feature, MASK) \ argument
76 DO_CHECK_FEATURE_MISMATCH((a)->cap, (b)->cap, cap, feature, MASK)
78 #define CHECK_FEATURE_MISMATCH_HOTPLUG(b, cap, feature, MASK) \ argument
80 if (cap##_##feature(intel_iommu_##cap##_sanity)) \
81 DO_CHECK_FEATURE_MISMATCH(intel_iommu_##cap##_sanity, \
82 (b)->cap, cap, feature, MASK); \
85 #define MINIMAL_FEATURE_IOMMU(iommu, cap, MASK) \ argument
[all …]
Dcap_audit.c22 CHECK_FEATURE_MISMATCH(a, b, cap, pi_support, CAP_PI_MASK); in check_irq_capabilities()
29 MINIMAL_FEATURE_IOMMU(b, cap, CAP_MAMV_MASK); in check_dmar_capabilities()
30 MINIMAL_FEATURE_IOMMU(b, cap, CAP_NFR_MASK); in check_dmar_capabilities()
31 MINIMAL_FEATURE_IOMMU(b, cap, CAP_SLLPS_MASK); in check_dmar_capabilities()
32 MINIMAL_FEATURE_IOMMU(b, cap, CAP_FRO_MASK); in check_dmar_capabilities()
33 MINIMAL_FEATURE_IOMMU(b, cap, CAP_MGAW_MASK); in check_dmar_capabilities()
34 MINIMAL_FEATURE_IOMMU(b, cap, CAP_SAGAW_MASK); in check_dmar_capabilities()
35 MINIMAL_FEATURE_IOMMU(b, cap, CAP_NDOMS_MASK); in check_dmar_capabilities()
40 CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK); in check_dmar_capabilities()
41 CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK); in check_dmar_capabilities()
[all …]
/drivers/pci/pcie/
Ddpc.c46 u16 *cap; in pci_save_dpc_state() local
55 cap = (u16 *)&save_state->cap.data[0]; in pci_save_dpc_state()
56 pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap); in pci_save_dpc_state()
62 u16 *cap; in pci_restore_dpc_state() local
71 cap = (u16 *)&save_state->cap.data[0]; in pci_restore_dpc_state()
72 pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap); in pci_restore_dpc_state()
130 u16 cap = pdev->dpc_cap, status; in dpc_wait_rp_inactive() local
132 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); in dpc_wait_rp_inactive()
136 pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); in dpc_wait_rp_inactive()
148 u16 cap; in dpc_reset_link() local
[all …]
Dptm.c42 u32 cap; in pci_ptm_init() local
55 pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap); in pci_ptm_init()
56 dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; in pci_ptm_init()
70 } else if (cap & PCI_PTM_CAP_ROOT) { in pci_ptm_init()
91 u32 *cap; in pci_save_ptm_state() local
100 cap = (u32 *)&save_state->cap.data[0]; in pci_save_ptm_state()
101 pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap); in pci_save_ptm_state()
108 u32 *cap; in pci_restore_ptm_state() local
117 cap = (u32 *)&save_state->cap.data[0]; in pci_restore_ptm_state()
118 pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap); in pci_restore_ptm_state()
/drivers/thunderbolt/
Dlc.c62 int cap, ret; in tb_lc_reset_port() local
68 cap = find_port_lc_cap(port); in tb_lc_reset_port()
69 if (cap < 0) in tb_lc_reset_port()
70 return cap; in tb_lc_reset_port()
72 ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); in tb_lc_reset_port()
78 ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); in tb_lc_reset_port()
84 ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); in tb_lc_reset_port()
90 return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); in tb_lc_reset_port()
98 int cap, ret; in tb_lc_set_port_configured() local
103 cap = find_port_lc_cap(port); in tb_lc_set_port_configured()
[all …]
Dcap.c87 static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) in __tb_port_find_cap() argument
103 if (header.basic.cap == cap) in __tb_port_find_cap()
119 int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) in tb_port_find_cap() argument
127 ret = __tb_port_find_cap(port, cap); in tb_port_find_cap()
157 switch (header.basic.cap) { in tb_switch_next_cap()
171 header.basic.cap, offset); in tb_switch_next_cap()
188 int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) in tb_switch_find_cap() argument
204 if (header.basic.cap == cap) in tb_switch_find_cap()
237 if (header.extended_short.cap == TB_SWITCH_CAP_VSE && in tb_switch_find_vse_cap()
/drivers/media/platform/qcom/venus/
Dhfi_parser.c14 typedef void (*func)(struct hfi_plat_caps *cap, const void *data,
19 struct hfi_plat_caps *caps = core->caps, *cap; in init_codecs() local
26 cap = &caps[core->codecs_count++]; in init_codecs()
27 cap->codec = BIT(bit); in init_codecs()
28 cap->domain = VIDC_SESSION_TYPE_DEC; in init_codecs()
29 cap->valid = false; in init_codecs()
33 cap = &caps[core->codecs_count++]; in init_codecs()
34 cap->codec = BIT(bit); in init_codecs()
35 cap->domain = VIDC_SESSION_TYPE_ENC; in init_codecs()
36 cap->valid = false; in init_codecs()
[all …]
/drivers/net/wireless/ath/wcn36xx/
Dfirmware.c82 enum wcn36xx_firmware_feat_caps cap) in wcn36xx_firmware_set_feat_caps() argument
86 if (cap < 0 || cap > 127) { in wcn36xx_firmware_set_feat_caps()
87 wcn36xx_warn("error cap idx %d\n", cap); in wcn36xx_firmware_set_feat_caps()
91 arr_idx = cap / 32; in wcn36xx_firmware_set_feat_caps()
92 bit_idx = cap % 32; in wcn36xx_firmware_set_feat_caps()
97 enum wcn36xx_firmware_feat_caps cap) in wcn36xx_firmware_get_feat_caps() argument
101 if (cap < 0 || cap > 127) { in wcn36xx_firmware_get_feat_caps()
102 wcn36xx_warn("error cap idx %d\n", cap); in wcn36xx_firmware_get_feat_caps()
106 arr_idx = cap / 32; in wcn36xx_firmware_get_feat_caps()
107 bit_idx = cap % 32; in wcn36xx_firmware_get_feat_caps()
[all …]
/drivers/infiniband/core/
Duverbs_std_types_qp.c65 struct ib_uverbs_qp_cap *cap, bool req) in set_caps() argument
68 attr->cap.max_send_wr = cap->max_send_wr; in set_caps()
69 attr->cap.max_recv_wr = cap->max_recv_wr; in set_caps()
70 attr->cap.max_send_sge = cap->max_send_sge; in set_caps()
71 attr->cap.max_recv_sge = cap->max_recv_sge; in set_caps()
72 attr->cap.max_inline_data = cap->max_inline_data; in set_caps()
74 cap->max_send_wr = attr->cap.max_send_wr; in set_caps()
75 cap->max_recv_wr = attr->cap.max_recv_wr; in set_caps()
76 cap->max_send_sge = attr->cap.max_send_sge; in set_caps()
77 cap->max_recv_sge = attr->cap.max_recv_sge; in set_caps()
[all …]
/drivers/char/tpm/
Dtpm-sysfs.c85 cap_t cap; in pcrs_show() local
94 if (tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap, in pcrs_show()
96 sizeof(cap.num_pcrs))) { in pcrs_show()
101 num_pcrs = be32_to_cpu(cap.num_pcrs); in pcrs_show()
122 cap_t cap; in enabled_show() local
127 if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap, in enabled_show()
129 sizeof(cap.perm_flags))) in enabled_show()
132 rc = sprintf(buf, "%d\n", !cap.perm_flags.disable); in enabled_show()
144 cap_t cap; in active_show() local
149 if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap, in active_show()
[all …]
/drivers/infiniband/hw/hfi1/
Dcommon.h45 #define HFI1_CAP_KSET(cap) ({ hfi1_cap_mask |= HFI1_CAP_##cap; hfi1_cap_mask; }) argument
46 #define HFI1_CAP_KCLEAR(cap) \ argument
48 hfi1_cap_mask &= ~HFI1_CAP_##cap; \
51 #define HFI1_CAP_USET(cap) \ argument
53 hfi1_cap_mask |= (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
56 #define HFI1_CAP_UCLEAR(cap) \ argument
58 hfi1_cap_mask &= ~(HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
61 #define HFI1_CAP_SET(cap) \ argument
63 hfi1_cap_mask |= (HFI1_CAP_##cap | (HFI1_CAP_##cap << \
67 #define HFI1_CAP_CLEAR(cap) \ argument
[all …]
/drivers/platform/surface/aggregator/
Dssh_parser.h27 size_t cap; member
39 static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap) in sshp_buf_init() argument
43 buf->cap = cap; in sshp_buf_init()
57 static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags) in sshp_buf_alloc() argument
61 ptr = kzalloc(cap, flags); in sshp_buf_alloc()
65 sshp_buf_init(buf, ptr, cap); in sshp_buf_alloc()
82 buf->cap = 0; in sshp_buf_free()
116 n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len); in sshp_buf_read_from_fifo()
/drivers/net/ethernet/netronome/nfp/bpf/
Dmain.c29 return nn->cap & NFP_NET_CFG_CTRL_BPF && in nfp_net_ebpf_capable()
209 struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value; in nfp_bpf_parse_cap_adjust_head() local
212 if (length < sizeof(*cap)) { in nfp_bpf_parse_cap_adjust_head()
217 bpf->adjust_head.flags = readl(&cap->flags); in nfp_bpf_parse_cap_adjust_head()
218 bpf->adjust_head.off_min = readl(&cap->off_min); in nfp_bpf_parse_cap_adjust_head()
219 bpf->adjust_head.off_max = readl(&cap->off_max); in nfp_bpf_parse_cap_adjust_head()
220 bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub); in nfp_bpf_parse_cap_adjust_head()
221 bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add); in nfp_bpf_parse_cap_adjust_head()
240 struct nfp_bpf_cap_tlv_func __iomem *cap = value; in nfp_bpf_parse_cap_func() local
242 if (length < sizeof(*cap)) { in nfp_bpf_parse_cap_func()
[all …]
/drivers/usb/typec/
Dpd.c397 static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int position) in add_pdo() argument
414 dev_warn(&cap->dev, "Unknown APDO type. PDO 0x%08x\n", pdo); in add_pdo()
419 if (is_source(cap->role)) in add_pdo()
426 if (is_source(cap->role)) in add_pdo()
434 p->dev.parent = &cap->dev; in add_pdo()
485 struct usb_power_delivery_capabilities *cap; in usb_power_delivery_register_capabilities() local
489 cap = kzalloc(sizeof(*cap), GFP_KERNEL); in usb_power_delivery_register_capabilities()
490 if (!cap) in usb_power_delivery_register_capabilities()
493 cap->pd = pd; in usb_power_delivery_register_capabilities()
494 cap->role = desc->role; in usb_power_delivery_register_capabilities()
[all …]
/drivers/platform/x86/intel/uncore-frequency/
Duncore-frequency.c42 u64 cap; in uncore_read_control_freq() local
48 ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); in uncore_read_control_freq()
52 *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; in uncore_read_control_freq()
53 *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; in uncore_read_control_freq()
62 u64 cap; in uncore_write_control_freq() local
71 ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); in uncore_write_control_freq()
76 cap &= ~0x7F; in uncore_write_control_freq()
77 cap |= input; in uncore_write_control_freq()
79 cap &= ~GENMASK(14, 8); in uncore_write_control_freq()
80 cap |= (input << 8); in uncore_write_control_freq()
[all …]
/drivers/usb/typec/ucsi/
Ducsi.c350 override = !!(con->ucsi->cap.features & UCSI_CAP_ALT_MODE_OVERRIDE); in ucsi_register_altmode()
443 max_altmodes = con->ucsi->cap.num_alt_modes; in ucsi_register_altmodes_nvidia()
514 if (!(con->ucsi->cap.features & UCSI_CAP_ALT_MODE_DETAILS)) in ucsi_register_altmodes()
524 max_altmodes = con->ucsi->cap.num_alt_modes; in ucsi_register_altmodes()
679 struct usb_power_delivery_desc desc = { con->ucsi->cap.pd_version }; in ucsi_register_partner_pdos()
681 struct usb_power_delivery_capabilities *cap; in ucsi_register_partner_pdos() local
697 cap = usb_power_delivery_register_capabilities(con->partner_pd, &caps); in ucsi_register_partner_pdos()
698 if (IS_ERR(cap)) in ucsi_register_partner_pdos()
699 return PTR_ERR(cap); in ucsi_register_partner_pdos()
701 con->partner_source_caps = cap; in ucsi_register_partner_pdos()
[all …]
/drivers/infiniband/sw/rxe/
Drxe_qp.c18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap, in rxe_qp_chk_cap() argument
21 if (cap->max_send_wr > rxe->attr.max_qp_wr) { in rxe_qp_chk_cap()
23 cap->max_send_wr, rxe->attr.max_qp_wr); in rxe_qp_chk_cap()
27 if (cap->max_send_sge > rxe->attr.max_send_sge) { in rxe_qp_chk_cap()
29 cap->max_send_sge, rxe->attr.max_send_sge); in rxe_qp_chk_cap()
34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) { in rxe_qp_chk_cap()
36 cap->max_recv_wr, rxe->attr.max_qp_wr); in rxe_qp_chk_cap()
40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) { in rxe_qp_chk_cap()
42 cap->max_recv_sge, rxe->attr.max_recv_sge); in rxe_qp_chk_cap()
47 if (cap->max_inline_data > rxe->max_inline_data) { in rxe_qp_chk_cap()
[all …]
/drivers/pci/
Dpci.c429 u8 pos, int cap, int *ttl) in __pci_find_next_cap_ttl() argument
445 if (id == cap) in __pci_find_next_cap_ttl()
453 u8 pos, int cap) in __pci_find_next_cap() argument
457 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); in __pci_find_next_cap()
460 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) in pci_find_next_capability() argument
463 pos + PCI_CAP_LIST_NEXT, cap); in pci_find_next_capability()
506 u8 pci_find_capability(struct pci_dev *dev, int cap) in pci_find_capability() argument
512 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); in pci_find_capability()
531 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) in pci_bus_find_capability() argument
539 pos = __pci_find_next_cap(bus, devfn, pos, cap); in pci_bus_find_capability()
[all …]
/drivers/gpu/drm/msm/disp/dpu1/
Ddpu_vbif.c46 if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) { in _dpu_vbif_wait_for_xin_halt()
51 timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout); in _dpu_vbif_wait_for_xin_halt()
89 if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM))) in _dpu_vbif_apply_dynamic_ot_limit()
100 tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl : in _dpu_vbif_apply_dynamic_ot_limit()
101 &vbif->cap->dynamic_ot_wr_tbl; in _dpu_vbif_apply_dynamic_ot_limit()
128 if (!vbif || !vbif->cap) { in _dpu_vbif_get_ot_limit()
133 if (vbif->cap->default_ot_wr_limit && !params->rd) in _dpu_vbif_get_ot_limit()
134 ot_lim = vbif->cap->default_ot_wr_limit; in _dpu_vbif_get_ot_limit()
135 else if (vbif->cap->default_ot_rd_limit && params->rd) in _dpu_vbif_get_ot_limit()
136 ot_lim = vbif->cap->default_ot_rd_limit; in _dpu_vbif_get_ot_limit()
[all …]
/drivers/net/arcnet/
Dcapmode.c77 memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto)); in rx()
78 memcpy(pktbuf + ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto) + sizeof(int), in rx()
79 pkthdrbuf + ARC_HDR_SIZE + sizeof(pkt->soft.cap.proto), in rx()
80 sizeof(struct archdr) - ARC_HDR_SIZE - sizeof(pkt->soft.cap.proto)); in rx()
107 *((int *)&pkt->soft.cap.cookie[0])); in build_header()
148 *((int *)&pkt->soft.cap.cookie[0])); in prepare_tx()
171 lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto, in prepare_tx()
172 sizeof(pkt->soft.cap.proto)); in prepare_tx()
178 ((unsigned char *)&pkt->soft.cap.mes), length - 1); in prepare_tx()
212 ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ in ack_tx()
[all …]
/drivers/vdpa/ifcvf/
Difcvf_base.c33 struct virtio_pci_cap *cap) in get_cap_addr() argument
38 length = le32_to_cpu(cap->length); in get_cap_addr()
39 offset = le32_to_cpu(cap->offset); in get_cap_addr()
40 bar = cap->bar; in get_cap_addr()
105 struct virtio_pci_cap cap; in ifcvf_init_hw() local
119 ret = ifcvf_read_config_range(pdev, (u32 *)&cap, in ifcvf_init_hw()
120 sizeof(cap), pos); in ifcvf_init_hw()
127 if (cap.cap_vndr != PCI_CAP_ID_VNDR) in ifcvf_init_hw()
130 switch (cap.cfg_type) { in ifcvf_init_hw()
132 hw->common_cfg = get_cap_addr(hw, &cap); in ifcvf_init_hw()
[all …]

12345678910>>...28