/kernel/linux/linux-5.10/net/openvswitch/ |
D | vport-netdev.c | 31 struct vport *vport; in netdev_port_receive() local 33 vport = ovs_netdev_get_vport(skb->dev); in netdev_port_receive() 34 if (unlikely(!vport)) in netdev_port_receive() 51 ovs_vport_receive(vport, skb, skb_tunnel_info(skb)); in netdev_port_receive() 71 struct vport *local; in get_dpdev() 77 struct vport *ovs_netdev_link(struct vport *vport, const char *name) in ovs_netdev_link() argument 81 vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name); in ovs_netdev_link() 82 if (!vport->dev) { in ovs_netdev_link() 87 if (vport->dev->flags & IFF_LOOPBACK || in ovs_netdev_link() 88 (vport->dev->type != ARPHRD_ETHER && in ovs_netdev_link() [all …]
|
D | vport.c | 95 struct vport *ovs_vport_locate(const struct net *net, const char *name) in ovs_vport_locate() 98 struct vport *vport; in ovs_vport_locate() local 100 hlist_for_each_entry_rcu(vport, bucket, hash_node, in ovs_vport_locate() 102 if (!strcmp(name, ovs_vport_name(vport)) && in ovs_vport_locate() 103 net_eq(ovs_dp_get_net(vport->dp), net)) in ovs_vport_locate() 104 return vport; in ovs_vport_locate() 120 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, in ovs_vport_alloc() 123 struct vport *vport; in ovs_vport_alloc() local 126 alloc_size = sizeof(struct vport); in ovs_vport_alloc() 132 vport = kzalloc(alloc_size, GFP_KERNEL); in ovs_vport_alloc() [all …]
|
D | vport.h | 20 struct vport; 28 struct vport *ovs_vport_add(const struct vport_parms *); 29 void ovs_vport_del(struct vport *); 31 struct vport *ovs_vport_locate(const struct net *net, const char *name); 33 void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *); 35 int ovs_vport_set_options(struct vport *, struct nlattr *options); 36 int ovs_vport_get_options(const struct vport *, struct sk_buff *); 38 int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids); 39 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *); 40 u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *); [all …]
|
D | vport-geneve.c | 36 static inline struct geneve_port *geneve_vport(const struct vport *vport) in geneve_vport() argument 38 return vport_priv(vport); in geneve_vport() 41 static int geneve_get_options(const struct vport *vport, in geneve_get_options() argument 44 struct geneve_port *geneve_port = geneve_vport(vport); in geneve_get_options() 51 static struct vport *geneve_tnl_create(const struct vport_parms *parms) in geneve_tnl_create() 57 struct vport *vport; in geneve_tnl_create() local 76 vport = ovs_vport_alloc(sizeof(struct geneve_port), in geneve_tnl_create() 78 if (IS_ERR(vport)) in geneve_tnl_create() 79 return vport; in geneve_tnl_create() 81 geneve_port = geneve_vport(vport); in geneve_tnl_create() [all …]
|
D | vport-internal_dev.c | 22 struct vport *vport; member 40 err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL); in internal_dev_xmit() 81 struct vport *vport = ovs_internal_dev_get_vport(dev); in internal_dev_destructor() local 83 ovs_vport_free(vport); in internal_dev_destructor() 138 static struct vport *internal_dev_create(const struct vport_parms *parms) in internal_dev_create() 140 struct vport *vport; in internal_dev_create() local 145 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); in internal_dev_create() 146 if (IS_ERR(vport)) { in internal_dev_create() 147 err = PTR_ERR(vport); in internal_dev_create() 153 vport->dev = dev; in internal_dev_create() [all …]
|
D | vport-vxlan.c | 22 static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb) in vxlan_get_options() argument 24 struct vxlan_dev *vxlan = netdev_priv(vport->dev); in vxlan_get_options() 51 static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr, in vxlan_configure_exts() argument 71 static struct vport *vxlan_tnl_create(const struct vport_parms *parms) in vxlan_tnl_create() 76 struct vport *vport; in vxlan_tnl_create() local 100 vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms); in vxlan_tnl_create() 101 if (IS_ERR(vport)) in vxlan_tnl_create() 102 return vport; in vxlan_tnl_create() 106 err = vxlan_configure_exts(vport, a, &conf); in vxlan_tnl_create() 108 ovs_vport_free(vport); in vxlan_tnl_create() [all …]
|
D | vport-gre.c | 39 static struct vport *gre_tnl_create(const struct vport_parms *parms) in gre_tnl_create() 43 struct vport *vport; in gre_tnl_create() local 46 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); in gre_tnl_create() 47 if (IS_ERR(vport)) in gre_tnl_create() 48 return vport; in gre_tnl_create() 54 ovs_vport_free(vport); in gre_tnl_create() 62 ovs_vport_free(vport); in gre_tnl_create() 67 return vport; in gre_tnl_create() 70 static struct vport *gre_create(const struct vport_parms *parms) in gre_create() 72 struct vport *vport; in gre_create() local [all …]
|
D | dp_notify.c | 14 static void dp_detach_port_notify(struct vport *vport) in dp_detach_port_notify() argument 19 dp = vport->dp; in dp_detach_port_notify() 20 notify = ovs_vport_cmd_build_info(vport, ovs_dp_get_net(dp), in dp_detach_port_notify() 22 ovs_dp_detach_port(vport); in dp_detach_port_notify() 44 struct vport *vport; in ovs_dp_notify_wq() local 47 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { in ovs_dp_notify_wq() 48 if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) in ovs_dp_notify_wq() 51 if (!(netif_is_ovs_port(vport->dev))) in ovs_dp_notify_wq() 52 dp_detach_port_notify(vport); in ovs_dp_notify_wq() 64 struct vport *vport = NULL; in dp_device_event() local [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ |
D | ingress_ofld.c | 11 const struct mlx5_vport *vport) in esw_acl_ingress_prio_tag_enabled() argument 14 mlx5_eswitch_is_vf_vport(esw, vport->vport)); in esw_acl_ingress_prio_tag_enabled() 18 struct mlx5_vport *vport) in esw_acl_ingress_prio_tag_create() argument 43 if (vport->ingress.offloads.modify_metadata_rule) { in esw_acl_ingress_prio_tag_create() 45 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; in esw_acl_ingress_prio_tag_create() 48 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, in esw_acl_ingress_prio_tag_create() 50 if (IS_ERR(vport->ingress.allow_rule)) { in esw_acl_ingress_prio_tag_create() 51 err = PTR_ERR(vport->ingress.allow_rule); in esw_acl_ingress_prio_tag_create() 54 vport->vport, err); in esw_acl_ingress_prio_tag_create() 55 vport->ingress.allow_rule = NULL; in esw_acl_ingress_prio_tag_create() [all …]
|
D | ingress_lgcy.c | 9 static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport) in esw_acl_ingress_lgcy_rules_destroy() argument 11 if (vport->ingress.legacy.drop_rule) { in esw_acl_ingress_lgcy_rules_destroy() 12 mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); in esw_acl_ingress_lgcy_rules_destroy() 13 vport->ingress.legacy.drop_rule = NULL; in esw_acl_ingress_lgcy_rules_destroy() 15 esw_acl_ingress_allow_rule_destroy(vport); in esw_acl_ingress_lgcy_rules_destroy() 19 struct mlx5_vport *vport) in esw_acl_ingress_lgcy_groups_create() argument 42 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); in esw_acl_ingress_lgcy_groups_create() 46 vport->vport, err); in esw_acl_ingress_lgcy_groups_create() 49 vport->ingress.legacy.allow_untagged_spoofchk_grp = g; in esw_acl_ingress_lgcy_groups_create() 58 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); in esw_acl_ingress_lgcy_groups_create() [all …]
|
D | egress_lgcy.c | 9 static void esw_acl_egress_lgcy_rules_destroy(struct mlx5_vport *vport) in esw_acl_egress_lgcy_rules_destroy() argument 11 esw_acl_egress_vlan_destroy(vport); in esw_acl_egress_lgcy_rules_destroy() 12 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) { in esw_acl_egress_lgcy_rules_destroy() 13 mlx5_del_flow_rules(vport->egress.legacy.drop_rule); in esw_acl_egress_lgcy_rules_destroy() 14 vport->egress.legacy.drop_rule = NULL; in esw_acl_egress_lgcy_rules_destroy() 19 struct mlx5_vport *vport) in esw_acl_egress_lgcy_groups_create() argument 27 err = esw_acl_egress_vlan_grp_create(esw, vport); in esw_acl_egress_lgcy_groups_create() 39 drop_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in); in esw_acl_egress_lgcy_groups_create() 43 vport->vport, err); in esw_acl_egress_lgcy_groups_create() 47 vport->egress.legacy.drop_grp = drop_grp; in esw_acl_egress_lgcy_groups_create() [all …]
|
D | egress_ofld.c | 9 static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport) in esw_acl_egress_ofld_fwd2vport_destroy() argument 11 if (!vport->egress.offloads.fwd_rule) in esw_acl_egress_ofld_fwd2vport_destroy() 14 mlx5_del_flow_rules(vport->egress.offloads.fwd_rule); in esw_acl_egress_ofld_fwd2vport_destroy() 15 vport->egress.offloads.fwd_rule = NULL; in esw_acl_egress_ofld_fwd2vport_destroy() 19 struct mlx5_vport *vport, in esw_acl_egress_ofld_fwd2vport_create() argument 26 vport->vport, fwd_dest->vport.num); in esw_acl_egress_ofld_fwd2vport_create() 29 esw_acl_egress_ofld_fwd2vport_destroy(vport); in esw_acl_egress_ofld_fwd2vport_create() 33 vport->egress.offloads.fwd_rule = in esw_acl_egress_ofld_fwd2vport_create() 34 mlx5_add_flow_rules(vport->egress.acl, NULL, in esw_acl_egress_ofld_fwd2vport_create() 36 if (IS_ERR(vport->egress.offloads.fwd_rule)) { in esw_acl_egress_ofld_fwd2vport_create() [all …]
|
D | helper.c | 46 struct mlx5_vport *vport, in esw_egress_acl_vlan_create() argument 54 if (vport->egress.allowed_vlan) in esw_egress_acl_vlan_create() 68 vport->egress.allowed_vlan = in esw_egress_acl_vlan_create() 69 mlx5_add_flow_rules(vport->egress.acl, spec, in esw_egress_acl_vlan_create() 71 if (IS_ERR(vport->egress.allowed_vlan)) { in esw_egress_acl_vlan_create() 72 err = PTR_ERR(vport->egress.allowed_vlan); in esw_egress_acl_vlan_create() 75 vport->vport, err); in esw_egress_acl_vlan_create() 76 vport->egress.allowed_vlan = NULL; in esw_egress_acl_vlan_create() 83 void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport) in esw_acl_egress_vlan_destroy() argument 85 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { in esw_acl_egress_vlan_destroy() [all …]
|
/kernel/linux/linux-5.10/drivers/scsi/lpfc/ |
D | lpfc_vport.c | 53 inline void lpfc_vport_set_state(struct lpfc_vport *vport, in lpfc_vport_set_state() argument 56 struct fc_vport *fc_vport = vport->fc_vport; in lpfc_vport_set_state() 76 vport->port_state = LPFC_VPORT_FAILED; in lpfc_vport_set_state() 79 vport->port_state = LPFC_VPORT_UNKNOWN; in lpfc_vport_set_state() 118 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) in lpfc_vport_sparm() argument 131 rc = lpfc_read_sparam(phba, pmb, vport->vpi); in lpfc_vport_sparm() 144 pmb->vport = vport; in lpfc_vport_sparm() 148 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_sparm() 157 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_sparm() 169 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); in lpfc_vport_sparm() [all …]
|
D | lpfc_els.c | 53 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 54 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 84 lpfc_els_chk_latt(struct lpfc_vport *vport) in lpfc_els_chk_latt() argument 86 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); in lpfc_els_chk_latt() 87 struct lpfc_hba *phba = vport->phba; in lpfc_els_chk_latt() 90 if (vport->port_state >= LPFC_VPORT_READY || in lpfc_els_chk_latt() 103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_els_chk_latt() 115 vport->fc_flag |= FC_ABORT_DISCOVERY; in lpfc_els_chk_latt() 119 lpfc_issue_clear_la(phba, vport); in lpfc_els_chk_latt() 153 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, in lpfc_prep_els_iocb() argument [all …]
|
D | lpfc_ct.c | 315 lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, in lpfc_gen_req() argument 322 struct lpfc_hba *phba = vport->phba; in lpfc_gen_req() 378 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, in lpfc_gen_req() 382 vport->port_state); in lpfc_gen_req() 385 geniocb->vport = vport; in lpfc_gen_req() 410 lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, in lpfc_ct_cmd() argument 416 struct lpfc_hba *phba = vport->phba; in lpfc_ct_cmd() 435 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, in lpfc_ct_cmd() 461 lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) in lpfc_prep_node_fc4type() argument 465 if ((vport->port_type != LPFC_NPIV_PORT) || in lpfc_prep_node_fc4type() [all …]
|
D | lpfc_nportdisc.c | 52 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, in lpfc_check_adisc() argument 73 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, in lpfc_check_sparm() argument 76 volatile struct serv_parm *hsp = &vport->fc_sparam; in lpfc_check_sparm() 155 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_check_sparm() 221 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, in lpfc_els_abort() 316 rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI, in lpfc_defer_pt2pt_acc() 344 struct lpfc_vport *vport = pmb->vport; in lpfc_defer_acc_rsp() local 364 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_defer_acc_rsp() 371 rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); in lpfc_defer_acc_rsp() 373 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_defer_acc_rsp() [all …]
|
D | lpfc_hbadisc.c | 71 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 96 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, in lpfc_terminate_rport_io() 101 lpfc_sli_abort_iocb(ndlp->vport, in lpfc_terminate_rport_io() 115 struct lpfc_vport *vport; in lpfc_dev_loss_tmo_callbk() local 128 vport = ndlp->vport; in lpfc_dev_loss_tmo_callbk() 129 phba = vport->phba; in lpfc_dev_loss_tmo_callbk() 131 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, in lpfc_dev_loss_tmo_callbk() 135 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, in lpfc_dev_loss_tmo_callbk() 143 if (vport->load_flag & FC_UNLOADING) { in lpfc_dev_loss_tmo_callbk() 159 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_dev_loss_tmo_callbk() [all …]
|
D | lpfc_nvme.c | 258 struct lpfc_vport *vport; in lpfc_nvme_create_queue() local 266 vport = lport->vport; in lpfc_nvme_create_queue() 287 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, in lpfc_nvme_create_queue() 315 struct lpfc_vport *vport; in lpfc_nvme_delete_queue() local 321 vport = lport->vport; in lpfc_nvme_delete_queue() 323 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, in lpfc_nvme_delete_queue() 334 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, in lpfc_nvme_localport_delete() 339 if (lport->vport->localport) in lpfc_nvme_localport_delete() 358 struct lpfc_vport *vport; in lpfc_nvme_remoteport_delete() local 365 vport = ndlp->vport; in lpfc_nvme_remoteport_delete() [all …]
|
D | lpfc_logmsg.h | 54 #define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \ argument 55 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \ 56 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 57 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } 70 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ argument 72 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \ 74 lpfc_dmp_dbg((vport)->phba); \ 75 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 76 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \ 77 } else if (!(vport)->cfg_log_verbose) \ [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/hisilicon/hns3/hns3pf/ |
D | hclge_mbx.c | 30 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, in hclge_gen_resp_to_vf() argument 35 struct hclge_dev *hdev = vport->back; in hclge_gen_resp_to_vf() 86 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, in hclge_send_mbx_msg() argument 90 struct hclge_dev *hdev = vport->back; in hclge_send_mbx_msg() 115 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) in hclge_inform_reset_assert_to_vf() argument 117 struct hclge_dev *hdev = vport->back; in hclge_inform_reset_assert_to_vf() 124 dest_vfid = (u8)vport->vport_id; in hclge_inform_reset_assert_to_vf() 136 return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), in hclge_inform_reset_assert_to_vf() 166 struct hclge_vport *vport) in hclge_get_ring_chain_from_mbx() argument 169 struct hclge_dev *hdev = vport->back; in hclge_get_ring_chain_from_mbx() [all …]
|
D | hclge_main.c | 545 struct hclge_vport *vport = hclge_get_vport(handle); in hclge_tqps_update_stats() local 546 struct hclge_dev *hdev = vport->back; in hclge_tqps_update_stats() 683 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all() 702 struct hclge_vport *vport = hclge_get_vport(handle); in hclge_update_stats() local 703 struct hclge_dev *hdev = vport->back; in hclge_update_stats() 731 struct hclge_vport *vport = hclge_get_vport(handle); in hclge_get_sset_count() local 732 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count() 807 struct hclge_vport *vport = hclge_get_vport(handle); in hclge_get_stats() local 808 struct hclge_dev *hdev = vport->back; in hclge_get_stats() 819 struct hclge_vport *vport = hclge_get_vport(handle); in hclge_get_mac_stat() local [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eswitch.c | 58 u16 vport; member 110 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, in arm_vport_context_events_cmd() argument 119 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); in arm_vport_context_events_cmd() 140 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, in mlx5_eswitch_modify_esw_vport_context() argument 145 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); in mlx5_eswitch_modify_esw_vport_context() 150 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, in modify_esw_vport_cvlan() argument 160 vport, vlan, qos, set_flags); in modify_esw_vport_cvlan() 182 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in); in modify_esw_vport_cvlan() 187 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, in __esw_fdb_set_vport_rule() argument 228 dest.vport.num = vport; in __esw_fdb_set_vport_rule() [all …]
|
/kernel/linux/linux-5.10/sound/isa/ |
D | sc6000.c | 186 static int sc6000_wait_data(char __iomem *vport) in sc6000_wait_data() argument 192 val = ioread8(vport + DSP_DATAVAIL); in sc6000_wait_data() 201 static int sc6000_read(char __iomem *vport) in sc6000_read() argument 203 if (sc6000_wait_data(vport)) in sc6000_read() 206 return ioread8(vport + DSP_READ); in sc6000_read() 210 static int sc6000_write(char __iomem *vport, int cmd) in sc6000_write() argument 216 val = ioread8(vport + DSP_STATUS); in sc6000_write() 221 iowrite8(cmd, vport + DSP_COMMAND); in sc6000_write() 232 static int sc6000_dsp_get_answer(char __iomem *vport, int command, in sc6000_dsp_get_answer() argument 237 if (sc6000_write(vport, command)) { in sc6000_dsp_get_answer() [all …]
|
/kernel/linux/linux-5.10/Documentation/scsi/ |
D | scsi_fc_transport.rst | 57 The FC transport is now recognizing a new object - a vport. A vport is 60 be specified for the vport, with FCP_Initiator being the primary role 64 to create vports. The transport will create the vport object within the 67 on the vport, resulting in a unique <H,C,T,L> namespace for the vport. 79 up to an administrative entity controlling the vport. For example, 81 utility would be responsible for creating wwpn/wwnn's for the vport, 91 transport creates the vport object and places it under the scsi_host 93 a new scsi_host for the vport and link its object under the vport. 96 allow the parent of the vport to be something other than the scsi_host. 98 device tree. If the vport's parent is not the physical port's scsi_host, [all …]
|