Lines Matching refs:ndev
128 struct mlx5_vdpa_net *ndev; member
181 static void free_resources(struct mlx5_vdpa_net *ndev);
182 static void init_mvqs(struct mlx5_vdpa_net *ndev);
184 static void teardown_driver(struct mlx5_vdpa_net *ndev);
304 static int create_tis(struct mlx5_vdpa_net *ndev) in create_tis() argument
306 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
312 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
313 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
320 static void destroy_tis(struct mlx5_vdpa_net *ndev) in destroy_tis() argument
322 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
328 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument
335 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
336 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
348 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) in umem_frag_buf_alloc() argument
352 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
353 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
356 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) in cq_frag_buf_free() argument
358 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
396 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, in qp_prepare() argument
404 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
418 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
420 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
430 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) in rq_buf_alloc() argument
432 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
434 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
437 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in rq_buf_free() argument
439 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
442 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
445 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
454 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
458 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
470 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
474 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
484 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
494 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
497 rq_buf_free(ndev, vqp); in qp_create()
502 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in qp_destroy() argument
508 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
509 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
510 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
512 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
513 rq_buf_free(ndev, vqp); in qp_destroy()
536 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions() local
539 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
554 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp() local
555 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
578 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) in cq_create() argument
580 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
581 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
582 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
600 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
614 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
630 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
649 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
651 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
655 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) in cq_destroy() argument
657 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
658 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
662 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
665 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
666 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
669 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
672 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in set_umem_size()
696 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) in umem_frag_buf_free() argument
698 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
701 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
711 set_umem_size(ndev, mvq, num, &umem); in create_umem()
712 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
725 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
733 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
735 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
747 umem_frag_buf_free(ndev, umem); in create_umem()
751 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
771 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
774 umem_frag_buf_free(ndev, umem); in umem_destroy()
777 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
783 err = create_umem(ndev, mvq, num); in umems_create()
791 umem_destroy(ndev, mvq, num); in umems_create()
796 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
801 umem_destroy(ndev, mvq, num); in umems_destroy()
804 static int get_queue_type(struct mlx5_vdpa_net *ndev) in get_queue_type() argument
808 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
838 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
848 err = umems_create(ndev, mvq); in create_virtqueue()
862 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
868 get_features_12_3(ndev->mvdev.actual_features)); in create_virtqueue()
870 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); in create_virtqueue()
873 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
880 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
884 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey); in create_virtqueue()
891 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
892 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
895 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
908 umems_destroy(ndev, mvq); in create_virtqueue()
912 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
920 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
923 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
924 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
928 umems_destroy(ndev, mvq); in destroy_virtqueue()
941 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, in alloc_inout() argument
957 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
969 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
986 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1004 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1036 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
1044 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
1048 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1053 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
1057 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
1061 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
1065 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1069 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1073 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1077 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1081 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1090 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1109 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1110 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1141 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1165 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1171 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1179 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_alloc() argument
1186 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1193 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1195 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1204 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_dealloc() argument
1209 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1214 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1216 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1217 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1220 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1231 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1235 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1239 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1243 err = connect_qps(ndev, mvq); in setup_vq()
1247 err = counter_set_alloc(ndev, mvq); in setup_vq()
1251 err = create_virtqueue(ndev, mvq); in setup_vq()
1256 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1258 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1268 counter_set_dealloc(ndev, mvq); in setup_vq()
1270 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1272 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1274 cq_destroy(ndev, idx); in setup_vq()
1278 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1288 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1289 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1291 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1292 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1299 static void suspend_vqs(struct mlx5_vdpa_net *ndev) in suspend_vqs() argument
1303 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
1304 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1307 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1312 suspend_vq(ndev, mvq); in teardown_vq()
1313 destroy_virtqueue(ndev, mvq); in teardown_vq()
1314 counter_set_dealloc(ndev, mvq); in teardown_vq()
1315 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1316 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1317 cq_destroy(ndev, mvq->index); in teardown_vq()
1321 static int create_rqt(struct mlx5_vdpa_net *ndev) in create_rqt() argument
1323 int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); in create_rqt()
1324 int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); in create_rqt()
1337 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1344 list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); in create_rqt()
1347 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1357 static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) in modify_rqt() argument
1372 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1379 list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); in modify_rqt()
1382 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1390 static void destroy_rqt(struct mlx5_vdpa_net *ndev) in destroy_rqt() argument
1392 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1395 static int create_tir(struct mlx5_vdpa_net *ndev) in create_tir() argument
1415 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1429 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1430 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1432 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1437 static void destroy_tir(struct mlx5_vdpa_net *ndev) in destroy_tir() argument
1439 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1445 static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, in mlx5_vdpa_add_mac_vlan_rules() argument
1471 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1481 dest.tir_num = ndev->res.tirn; in mlx5_vdpa_add_mac_vlan_rules()
1482 rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1); in mlx5_vdpa_add_mac_vlan_rules()
1492 rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1); in mlx5_vdpa_add_mac_vlan_rules()
1507 static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev, in mlx5_vdpa_del_mac_vlan_rules() argument
1533 static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value) in mac_vlan_lookup() argument
1539 hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) { in mac_vlan_lookup()
1546 static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) // vlan -> vid in mac_vlan_add() argument
1554 if (mac_vlan_lookup(ndev, val)) in mac_vlan_add()
1561 err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, vlan, tagged, in mac_vlan_add()
1568 hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]); in mac_vlan_add()
1576 static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) in mac_vlan_del() argument
1580 ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged)); in mac_vlan_del()
1585 mlx5_vdpa_del_mac_vlan_rules(ndev, ptr->ucast_rule, ptr->mcast_rule); in mac_vlan_del()
1589 static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev) in clear_mac_vlan_table() argument
1596 hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) { in clear_mac_vlan_table()
1598 mlx5_vdpa_del_mac_vlan_rules(ndev, pos->ucast_rule, pos->mcast_rule); in clear_mac_vlan_table()
1604 static int setup_steering(struct mlx5_vdpa_net *ndev) in setup_steering() argument
1613 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
1615 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
1619 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in setup_steering()
1620 if (IS_ERR(ndev->rxft)) { in setup_steering()
1621 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
1622 return PTR_ERR(ndev->rxft); in setup_steering()
1625 err = mac_vlan_add(ndev, ndev->config.mac, 0, false); in setup_steering()
1632 mlx5_destroy_flow_table(ndev->rxft); in setup_steering()
1636 static void teardown_steering(struct mlx5_vdpa_net *ndev) in teardown_steering() argument
1638 clear_mac_vlan_table(ndev); in teardown_steering()
1639 mlx5_destroy_flow_table(ndev->rxft); in teardown_steering()
1644 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac() local
1658 if (!memcmp(ndev->config.mac, mac, 6)) { in handle_ctrl_mac()
1666 if (!is_zero_ether_addr(ndev->config.mac)) { in handle_ctrl_mac()
1667 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1669 ndev->config.mac); in handle_ctrl_mac()
1683 memcpy(mac_back, ndev->config.mac, ETH_ALEN); in handle_ctrl_mac()
1685 memcpy(ndev->config.mac, mac, ETH_ALEN); in handle_ctrl_mac()
1689 mac_vlan_del(ndev, mac_back, 0, false); in handle_ctrl_mac()
1691 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { in handle_ctrl_mac()
1703 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1705 ndev->config.mac); in handle_ctrl_mac()
1713 memcpy(ndev->config.mac, mac_back, ETH_ALEN); in handle_ctrl_mac()
1715 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) in handle_ctrl_mac()
1733 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps() local
1734 int cur_qps = ndev->cur_num_vqs / 2; in change_num_qps()
1739 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
1743 for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) in change_num_qps()
1744 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1746 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
1748 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
1750 err = setup_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1754 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
1762 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1764 ndev->cur_num_vqs = 2 * cur_qps; in change_num_qps()
1771 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq() local
1798 newqps > ndev->rqt_size) in handle_ctrl_mq()
1801 if (ndev->cur_num_vqs == 2 * newqps) { in handle_ctrl_mq()
1819 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_vlan() local
1826 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
1836 if (mac_vlan_add(ndev, ndev->config.mac, id, true)) in handle_ctrl_vlan()
1847 mac_vlan_del(ndev, ndev->config.mac, id, true); in handle_ctrl_vlan()
1864 struct mlx5_vdpa_net *ndev; in mlx5_cvq_kick_handler() local
1870 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
1873 down_write(&ndev->reslock); in mlx5_cvq_kick_handler()
1878 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
1926 up_write(&ndev->reslock); in mlx5_cvq_kick_handler()
1932 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq() local
1942 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
1946 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
1950 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
1957 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address() local
1970 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
1980 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num() local
1986 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
1993 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb() local
1995 ndev->event_cbs[idx] = *cb; in mlx5_vdpa_set_vq_cb()
2024 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready() local
2039 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2041 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
2043 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in mlx5_vdpa_set_vq_ready()
2057 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready() local
2065 return ndev->vqs[idx].ready; in mlx5_vdpa_get_vq_ready()
2072 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state() local
2083 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2097 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state() local
2110 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2124 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
2193 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_device_features() local
2195 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2196 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2223 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues() local
2228 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2237 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2242 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) in teardown_virtqueues() argument
2247 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2248 mvq = &ndev->vqs[i]; in teardown_virtqueues()
2252 teardown_vq(ndev, mvq); in teardown_virtqueues()
2277 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_driver_features() local
2286 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2287 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) in mlx5_vdpa_set_driver_features()
2288 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); in mlx5_vdpa_set_driver_features()
2290 ndev->rqt_size = 1; in mlx5_vdpa_set_driver_features()
2292 ndev->cur_num_vqs = 2 * ndev->rqt_size; in mlx5_vdpa_set_driver_features()
2301 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_config_cb() local
2303 ndev->config_cb = *cb; in mlx5_vdpa_set_config_cb()
2325 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status() local
2327 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2328 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2331 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
2338 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
2354 static int save_channels_info(struct mlx5_vdpa_net *ndev) in save_channels_info() argument
2358 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2359 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
2360 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
2365 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) in mlx5_clear_vqs() argument
2369 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2370 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
2373 static void restore_channels_info(struct mlx5_vdpa_net *ndev) in restore_channels_info() argument
2379 mlx5_clear_vqs(ndev); in restore_channels_info()
2380 init_mvqs(ndev); in restore_channels_info()
2381 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2382 mvq = &ndev->vqs[i]; in restore_channels_info()
2400 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map() local
2403 suspend_vqs(ndev); in mlx5_vdpa_change_map()
2404 err = save_channels_info(ndev); in mlx5_vdpa_change_map()
2408 teardown_driver(ndev); in mlx5_vdpa_change_map()
2417 restore_channels_info(ndev); in mlx5_vdpa_change_map()
2433 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_driver() local
2436 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in setup_driver()
2438 if (ndev->setup) { in setup_driver()
2449 err = create_rqt(ndev); in setup_driver()
2455 err = create_tir(ndev); in setup_driver()
2461 err = setup_steering(ndev); in setup_driver()
2466 ndev->setup = true; in setup_driver()
2471 destroy_tir(ndev); in setup_driver()
2473 destroy_rqt(ndev); in setup_driver()
2475 teardown_virtqueues(ndev); in setup_driver()
2481 static void teardown_driver(struct mlx5_vdpa_net *ndev) in teardown_driver() argument
2484 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in teardown_driver()
2486 if (!ndev->setup) in teardown_driver()
2489 teardown_steering(ndev); in teardown_driver()
2490 destroy_tir(ndev); in teardown_driver()
2491 destroy_rqt(ndev); in teardown_driver()
2492 teardown_virtqueues(ndev); in teardown_driver()
2493 ndev->setup = false; in teardown_driver()
2496 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) in clear_vqs_ready() argument
2500 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
2501 ndev->vqs[i].ready = false; in clear_vqs_ready()
2503 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2529 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status() local
2534 down_write(&ndev->reslock); in mlx5_vdpa_set_status()
2536 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
2554 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
2555 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
2559 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
2560 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
2562 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
2577 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset() local
2582 down_write(&ndev->reslock); in mlx5_vdpa_reset()
2583 teardown_driver(ndev); in mlx5_vdpa_reset()
2584 clear_vqs_ready(ndev); in mlx5_vdpa_reset()
2585 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_reset()
2586 ndev->mvdev.status = 0; in mlx5_vdpa_reset()
2587 ndev->mvdev.suspended = false; in mlx5_vdpa_reset()
2588 ndev->cur_num_vqs = 0; in mlx5_vdpa_reset()
2589 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_reset()
2590 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_reset()
2591 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_reset()
2592 ndev->mvdev.actual_features = 0; in mlx5_vdpa_reset()
2600 up_write(&ndev->reslock); in mlx5_vdpa_reset()
2614 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config() local
2617 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
2655 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map() local
2658 down_write(&ndev->reslock); in mlx5_vdpa_set_map()
2660 up_write(&ndev->reslock); in mlx5_vdpa_set_map()
2668 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_free() local
2670 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
2672 free_resources(ndev); in mlx5_vdpa_free()
2674 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
2676 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
2678 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
2679 kfree(ndev->event_cbs); in mlx5_vdpa_free()
2680 kfree(ndev->vqs); in mlx5_vdpa_free()
2687 struct mlx5_vdpa_net *ndev; in mlx5_get_vq_notification() local
2700 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
2701 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
2719 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in counter_set_query() argument
2728 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
2738 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
2741 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
2756 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vendor_vq_stats() local
2763 down_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
2777 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
2778 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); in mlx5_vdpa_get_vendor_vq_stats()
2802 up_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
2820 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_suspend() local
2826 down_write(&ndev->reslock); in mlx5_vdpa_suspend()
2827 ndev->nb_registered = false; in mlx5_vdpa_suspend()
2828 mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); in mlx5_vdpa_suspend()
2829 flush_workqueue(ndev->mvdev.wq); in mlx5_vdpa_suspend()
2830 for (i = 0; i < ndev->cur_num_vqs; i++) { in mlx5_vdpa_suspend()
2831 mvq = &ndev->vqs[i]; in mlx5_vdpa_suspend()
2832 suspend_vq(ndev, mvq); in mlx5_vdpa_suspend()
2836 up_write(&ndev->reslock); in mlx5_vdpa_suspend()
2899 static int alloc_resources(struct mlx5_vdpa_net *ndev) in alloc_resources() argument
2901 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
2905 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
2909 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
2913 err = create_tis(ndev); in alloc_resources()
2922 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
2926 static void free_resources(struct mlx5_vdpa_net *ndev) in free_resources() argument
2928 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
2933 destroy_tis(ndev); in free_resources()
2934 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
2938 static void init_mvqs(struct mlx5_vdpa_net *ndev) in init_mvqs() argument
2943 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
2944 mvq = &ndev->vqs[i]; in init_mvqs()
2947 mvq->ndev = ndev; in init_mvqs()
2951 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
2952 mvq = &ndev->vqs[i]; in init_mvqs()
2955 mvq->ndev = ndev; in init_mvqs()
2962 struct mlx5_vdpa_net *ndev; member
2997 struct mlx5_vdpa_net *ndev; in update_carrier() local
3001 ndev = to_mlx5_vdpa_ndev(mvdev); in update_carrier()
3003 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
3005 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
3007 if (ndev->nb_registered && ndev->config_cb.callback) in update_carrier()
3008 ndev->config_cb.callback(ndev->config_cb.private); in update_carrier()
3015 struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb); in event_handler() local
3028 wqent->mvdev = &ndev->mvdev; in event_handler()
3030 queue_work(ndev->mvdev.wq, &wqent->work); in event_handler()
3070 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_dev_add() local
3076 if (mgtdev->ndev) in mlx5_vdpa_dev_add()
3103 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_dev_add()
3105 if (IS_ERR(ndev)) in mlx5_vdpa_dev_add()
3106 return PTR_ERR(ndev); in mlx5_vdpa_dev_add()
3108 ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features; in mlx5_vdpa_dev_add()
3109 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3110 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3113 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3114 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3115 if (!ndev->vqs || !ndev->event_cbs) { in mlx5_vdpa_dev_add()
3120 init_mvqs(ndev); in mlx5_vdpa_dev_add()
3121 init_rwsem(&ndev->reslock); in mlx5_vdpa_dev_add()
3122 config = &ndev->config; in mlx5_vdpa_dev_add()
3134 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3137 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3139 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3142 memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN); in mlx5_vdpa_dev_add()
3155 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC); in mlx5_vdpa_dev_add()
3160 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3170 err = alloc_resources(ndev); in mlx5_vdpa_dev_add()
3174 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3175 INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); in mlx5_vdpa_dev_add()
3182 ndev->nb.notifier_call = event_handler; in mlx5_vdpa_dev_add()
3183 mlx5_notifier_register(mdev, &ndev->nb); in mlx5_vdpa_dev_add()
3184 ndev->nb_registered = true; in mlx5_vdpa_dev_add()
3190 mgtdev->ndev = ndev; in mlx5_vdpa_dev_add()
3196 free_resources(ndev); in mlx5_vdpa_dev_add()
3200 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3213 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_dev_del() local
3216 if (ndev->nb_registered) { in mlx5_vdpa_dev_del()
3217 ndev->nb_registered = false; in mlx5_vdpa_dev_del()
3218 mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); in mlx5_vdpa_dev_del()
3224 mgtdev->ndev = NULL; in mlx5_vdpa_dev_del()