• Home
  • Raw
  • Download

Lines Matching full:ndev

118 	struct mlx5_vdpa_net *ndev;  member
140 static void free_resources(struct mlx5_vdpa_net *ndev);
141 static void init_mvqs(struct mlx5_vdpa_net *ndev);
143 static void teardown_driver(struct mlx5_vdpa_net *ndev);
263 static int create_tis(struct mlx5_vdpa_net *ndev) in create_tis() argument
265 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
271 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
272 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
279 static void destroy_tis(struct mlx5_vdpa_net *ndev) in destroy_tis() argument
281 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
287 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument
294 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
295 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
307 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) in umem_frag_buf_alloc() argument
311 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
312 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
315 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) in cq_frag_buf_free() argument
317 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
355 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, in qp_prepare() argument
363 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
377 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
379 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
389 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) in rq_buf_alloc() argument
391 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
393 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
396 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in rq_buf_free() argument
398 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
401 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
404 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
413 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
417 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
429 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
433 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
443 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
453 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
456 rq_buf_free(ndev, vqp); in qp_create()
461 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in qp_destroy() argument
467 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
468 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
469 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
471 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
472 rq_buf_free(ndev, vqp); in qp_destroy()
495 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions() local
498 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
513 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp() local
514 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
537 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) in cq_create() argument
539 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
540 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
541 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
559 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
573 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
589 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
608 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
610 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
614 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) in cq_destroy() argument
616 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
617 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
621 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
624 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
625 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
628 static int read_umem_params(struct mlx5_vdpa_net *ndev) in read_umem_params() argument
632 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
647 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
654 ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a); in read_umem_params()
655 ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b); in read_umem_params()
657 ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a); in read_umem_params()
658 ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b); in read_umem_params()
660 ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a); in read_umem_params()
661 ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b); in read_umem_params()
668 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
676 p_a = ndev->umem_1_buffer_param_a; in set_umem_size()
677 p_b = ndev->umem_1_buffer_param_b; in set_umem_size()
681 p_a = ndev->umem_2_buffer_param_a; in set_umem_size()
682 p_b = ndev->umem_2_buffer_param_b; in set_umem_size()
686 p_a = ndev->umem_3_buffer_param_a; in set_umem_size()
687 p_b = ndev->umem_3_buffer_param_b; in set_umem_size()
695 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) in umem_frag_buf_free() argument
697 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
700 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
710 set_umem_size(ndev, mvq, num, &umem); in create_umem()
711 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
724 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
732 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
734 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
746 umem_frag_buf_free(ndev, umem); in create_umem()
750 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
770 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
773 umem_frag_buf_free(ndev, umem); in umem_destroy()
776 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
782 err = create_umem(ndev, mvq, num); in umems_create()
790 umem_destroy(ndev, mvq, num); in umems_create()
795 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
800 umem_destroy(ndev, mvq, num); in umems_destroy()
803 static int get_queue_type(struct mlx5_vdpa_net *ndev) in get_queue_type() argument
807 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
860 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
871 err = umems_create(ndev, mvq); in create_virtqueue()
881 mlx_features = get_features(ndev->mvdev.actual_features); in create_virtqueue()
886 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
896 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); in create_virtqueue()
899 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
912 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
916 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey); in create_virtqueue()
923 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
924 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
927 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
940 umems_destroy(ndev, mvq); in create_virtqueue()
944 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
952 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
955 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
956 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
960 umems_destroy(ndev, mvq); in destroy_virtqueue()
973 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, in alloc_inout() argument
989 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1001 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1018 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1036 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1068 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
1076 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
1080 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1085 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
1089 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
1093 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
1097 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1101 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1105 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1109 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1113 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1122 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1141 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1142 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1173 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1197 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1203 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1211 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_alloc() argument
1218 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1225 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1227 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1236 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_dealloc() argument
1241 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1246 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1248 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1249 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1262 static void alloc_vector(struct mlx5_vdpa_net *ndev, in alloc_vector() argument
1265 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in alloc_vector()
1274 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1275 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1288 static void dealloc_vector(struct mlx5_vdpa_net *ndev, in dealloc_vector() argument
1291 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in dealloc_vector()
1302 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1313 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1317 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1321 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1325 err = connect_qps(ndev, mvq); in setup_vq()
1329 err = counter_set_alloc(ndev, mvq); in setup_vq()
1333 alloc_vector(ndev, mvq); in setup_vq()
1334 err = create_virtqueue(ndev, mvq); in setup_vq()
1339 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1341 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1351 destroy_virtqueue(ndev, mvq); in setup_vq()
1353 dealloc_vector(ndev, mvq); in setup_vq()
1354 counter_set_dealloc(ndev, mvq); in setup_vq()
1356 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1358 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1360 cq_destroy(ndev, idx); in setup_vq()
1364 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1374 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1375 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1377 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1378 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1385 static void suspend_vqs(struct mlx5_vdpa_net *ndev) in suspend_vqs() argument
1389 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
1390 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1393 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1398 suspend_vq(ndev, mvq); in teardown_vq()
1399 destroy_virtqueue(ndev, mvq); in teardown_vq()
1400 dealloc_vector(ndev, mvq); in teardown_vq()
1401 counter_set_dealloc(ndev, mvq); in teardown_vq()
1402 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1403 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1404 cq_destroy(ndev, mvq->index); in teardown_vq()
1408 static int create_rqt(struct mlx5_vdpa_net *ndev) in create_rqt() argument
1410 int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); in create_rqt()
1411 int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); in create_rqt()
1424 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1431 list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); in create_rqt()
1434 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1444 static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) in modify_rqt() argument
1459 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1466 list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); in modify_rqt()
1469 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1477 static void destroy_rqt(struct mlx5_vdpa_net *ndev) in destroy_rqt() argument
1479 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1482 static int create_tir(struct mlx5_vdpa_net *ndev) in create_tir() argument
1502 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1516 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1517 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1519 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1524 mlx5_vdpa_add_tirn(ndev); in create_tir()
1528 static void destroy_tir(struct mlx5_vdpa_net *ndev) in destroy_tir() argument
1530 mlx5_vdpa_remove_tirn(ndev); in destroy_tir()
1531 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1543 static int add_steering_counters(struct mlx5_vdpa_net *ndev, in add_steering_counters() argument
1551 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1555 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1566 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1573 static void remove_steering_counters(struct mlx5_vdpa_net *ndev, in remove_steering_counters() argument
1577 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1578 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1582 static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, in mlx5_vdpa_add_mac_vlan_rules() argument
1607 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1617 dests[0].tir_num = ndev->res.tirn; in mlx5_vdpa_add_mac_vlan_rules()
1618 err = add_steering_counters(ndev, node, &flow_act, dests); in mlx5_vdpa_add_mac_vlan_rules()
1625 node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1639 node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1645 mlx5_vdpa_add_rx_counters(ndev, node); in mlx5_vdpa_add_mac_vlan_rules()
1651 remove_steering_counters(ndev, node); in mlx5_vdpa_add_mac_vlan_rules()
1657 static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev, in mlx5_vdpa_del_mac_vlan_rules() argument
1660 mlx5_vdpa_remove_rx_counters(ndev, node); in mlx5_vdpa_del_mac_vlan_rules()
1683 static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value) in mac_vlan_lookup() argument
1689 hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) { in mac_vlan_lookup()
1696 static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vid, bool tagged) in mac_vlan_add() argument
1704 if (mac_vlan_lookup(ndev, val)) in mac_vlan_add()
1713 ptr->ndev = ndev; in mac_vlan_add()
1714 err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, ptr); in mac_vlan_add()
1719 hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]); in mac_vlan_add()
1727 static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) in mac_vlan_del() argument
1731 ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged)); in mac_vlan_del()
1736 mlx5_vdpa_del_mac_vlan_rules(ndev, ptr); in mac_vlan_del()
1737 remove_steering_counters(ndev, ptr); in mac_vlan_del()
1741 static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev) in clear_mac_vlan_table() argument
1748 hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) { in clear_mac_vlan_table()
1750 mlx5_vdpa_del_mac_vlan_rules(ndev, pos); in clear_mac_vlan_table()
1751 remove_steering_counters(ndev, pos); in clear_mac_vlan_table()
1757 static int setup_steering(struct mlx5_vdpa_net *ndev) in setup_steering() argument
1766 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
1768 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
1772 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in setup_steering()
1773 if (IS_ERR(ndev->rxft)) { in setup_steering()
1774 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
1775 return PTR_ERR(ndev->rxft); in setup_steering()
1777 mlx5_vdpa_add_rx_flow_table(ndev); in setup_steering()
1779 err = mac_vlan_add(ndev, ndev->config.mac, 0, false); in setup_steering()
1786 mlx5_vdpa_remove_rx_flow_table(ndev); in setup_steering()
1787 mlx5_destroy_flow_table(ndev->rxft); in setup_steering()
1791 static void teardown_steering(struct mlx5_vdpa_net *ndev) in teardown_steering() argument
1793 clear_mac_vlan_table(ndev); in teardown_steering()
1794 mlx5_vdpa_remove_rx_flow_table(ndev); in teardown_steering()
1795 mlx5_destroy_flow_table(ndev->rxft); in teardown_steering()
1800 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac() local
1814 if (!memcmp(ndev->config.mac, mac, 6)) { in handle_ctrl_mac()
1822 if (!is_zero_ether_addr(ndev->config.mac)) { in handle_ctrl_mac()
1823 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1825 ndev->config.mac); in handle_ctrl_mac()
1839 memcpy(mac_back, ndev->config.mac, ETH_ALEN); in handle_ctrl_mac()
1841 memcpy(ndev->config.mac, mac, ETH_ALEN); in handle_ctrl_mac()
1845 mac_vlan_del(ndev, mac_back, 0, false); in handle_ctrl_mac()
1847 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { in handle_ctrl_mac()
1859 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1861 ndev->config.mac); in handle_ctrl_mac()
1869 memcpy(ndev->config.mac, mac_back, ETH_ALEN); in handle_ctrl_mac()
1871 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) in handle_ctrl_mac()
1889 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps() local
1890 int cur_qps = ndev->cur_num_vqs / 2; in change_num_qps()
1895 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
1899 for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) in change_num_qps()
1900 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1902 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
1904 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
1906 err = setup_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1910 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
1918 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1920 ndev->cur_num_vqs = 2 * cur_qps; in change_num_qps()
1927 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq() local
1954 newqps > ndev->rqt_size) in handle_ctrl_mq()
1957 if (ndev->cur_num_vqs == 2 * newqps) { in handle_ctrl_mq()
1975 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_vlan() local
1982 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
1992 if (mac_vlan_add(ndev, ndev->config.mac, id, true)) in handle_ctrl_vlan()
2003 mac_vlan_del(ndev, ndev->config.mac, id, true); in handle_ctrl_vlan()
2020 struct mlx5_vdpa_net *ndev; in mlx5_cvq_kick_handler() local
2026 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
2029 down_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2034 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2082 up_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2088 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq() local
2098 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2102 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2106 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2113 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address() local
2126 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2136 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num() local
2142 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2149 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb() local
2151 ndev->event_cbs[idx] = *cb; in mlx5_vdpa_set_vq_cb()
2180 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready() local
2195 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2197 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
2199 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in mlx5_vdpa_set_vq_ready()
2213 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready() local
2221 return ndev->vqs[idx].ready; in mlx5_vdpa_get_vq_ready()
2228 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state() local
2239 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2253 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state() local
2266 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2280 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
2354 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_device_features() local
2356 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2357 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2384 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues() local
2389 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2398 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2403 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) in teardown_virtqueues() argument
2408 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2409 mvq = &ndev->vqs[i]; in teardown_virtqueues()
2413 teardown_vq(ndev, mvq); in teardown_virtqueues()
2467 struct mlx5_vdpa_net *ndev; in update_carrier() local
2471 ndev = to_mlx5_vdpa_ndev(mvdev); in update_carrier()
2473 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2475 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2477 if (ndev->config_cb.callback) in update_carrier()
2478 ndev->config_cb.callback(ndev->config_cb.private); in update_carrier()
2483 static int queue_link_work(struct mlx5_vdpa_net *ndev) in queue_link_work() argument
2491 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2493 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2499 struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb); in event_handler() local
2507 if (queue_link_work(ndev)) in event_handler()
2520 static void register_link_notifier(struct mlx5_vdpa_net *ndev) in register_link_notifier() argument
2522 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2525 ndev->nb.notifier_call = event_handler; in register_link_notifier()
2526 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2527 ndev->nb_registered = true; in register_link_notifier()
2528 queue_link_work(ndev); in register_link_notifier()
2531 static void unregister_link_notifier(struct mlx5_vdpa_net *ndev) in unregister_link_notifier() argument
2533 if (!ndev->nb_registered) in unregister_link_notifier()
2536 ndev->nb_registered = false; in unregister_link_notifier()
2537 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2538 if (ndev->mvdev.wq) in unregister_link_notifier()
2539 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2545 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_driver_features() local
2554 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2555 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) in mlx5_vdpa_set_driver_features()
2556 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); in mlx5_vdpa_set_driver_features()
2558 ndev->rqt_size = 1; in mlx5_vdpa_set_driver_features()
2568 ndev->cur_num_vqs = 2; in mlx5_vdpa_set_driver_features()
2577 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_config_cb() local
2579 ndev->config_cb = *cb; in mlx5_vdpa_set_config_cb()
2601 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status() local
2603 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2604 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2607 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
2614 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
2631 static int save_channels_info(struct mlx5_vdpa_net *ndev) in save_channels_info() argument
2635 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2636 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
2637 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
2642 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) in mlx5_clear_vqs() argument
2646 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2647 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
2650 static void restore_channels_info(struct mlx5_vdpa_net *ndev) in restore_channels_info() argument
2656 mlx5_clear_vqs(ndev); in restore_channels_info()
2657 init_mvqs(ndev); in restore_channels_info()
2658 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2659 mvq = &ndev->vqs[i]; in restore_channels_info()
2678 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map() local
2681 suspend_vqs(ndev); in mlx5_vdpa_change_map()
2682 err = save_channels_info(ndev); in mlx5_vdpa_change_map()
2686 teardown_driver(ndev); in mlx5_vdpa_change_map()
2695 restore_channels_info(ndev); in mlx5_vdpa_change_map()
2711 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_driver() local
2714 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in setup_driver()
2716 if (ndev->setup) { in setup_driver()
2721 mlx5_vdpa_add_debugfs(ndev); in setup_driver()
2723 err = read_umem_params(ndev); in setup_driver()
2733 err = create_rqt(ndev); in setup_driver()
2739 err = create_tir(ndev); in setup_driver()
2745 err = setup_steering(ndev); in setup_driver()
2750 ndev->setup = true; in setup_driver()
2755 destroy_tir(ndev); in setup_driver()
2757 destroy_rqt(ndev); in setup_driver()
2759 teardown_virtqueues(ndev); in setup_driver()
2761 mlx5_vdpa_remove_debugfs(ndev); in setup_driver()
2767 static void teardown_driver(struct mlx5_vdpa_net *ndev) in teardown_driver() argument
2770 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in teardown_driver()
2772 if (!ndev->setup) in teardown_driver()
2775 mlx5_vdpa_remove_debugfs(ndev); in teardown_driver()
2776 teardown_steering(ndev); in teardown_driver()
2777 destroy_tir(ndev); in teardown_driver()
2778 destroy_rqt(ndev); in teardown_driver()
2779 teardown_virtqueues(ndev); in teardown_driver()
2780 ndev->setup = false; in teardown_driver()
2783 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) in clear_vqs_ready() argument
2787 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
2788 ndev->vqs[i].ready = false; in clear_vqs_ready()
2790 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2816 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status() local
2821 down_write(&ndev->reslock); in mlx5_vdpa_set_status()
2823 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
2830 register_link_notifier(ndev); in mlx5_vdpa_set_status()
2842 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
2843 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
2847 unregister_link_notifier(ndev); in mlx5_vdpa_set_status()
2849 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
2850 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
2852 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
2867 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset() local
2872 down_write(&ndev->reslock); in mlx5_vdpa_reset()
2873 unregister_link_notifier(ndev); in mlx5_vdpa_reset()
2874 teardown_driver(ndev); in mlx5_vdpa_reset()
2875 clear_vqs_ready(ndev); in mlx5_vdpa_reset()
2876 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_reset()
2877 ndev->mvdev.status = 0; in mlx5_vdpa_reset()
2878 ndev->mvdev.suspended = false; in mlx5_vdpa_reset()
2879 ndev->cur_num_vqs = 0; in mlx5_vdpa_reset()
2880 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_reset()
2881 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_reset()
2882 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_reset()
2883 ndev->mvdev.actual_features = 0; in mlx5_vdpa_reset()
2891 up_write(&ndev->reslock); in mlx5_vdpa_reset()
2905 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config() local
2908 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
2946 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map() local
2949 down_write(&ndev->reslock); in mlx5_vdpa_set_map()
2951 up_write(&ndev->reslock); in mlx5_vdpa_set_map()
2965 static void free_irqs(struct mlx5_vdpa_net *ndev) in free_irqs() argument
2970 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
2973 if (!ndev->irqp.entries) in free_irqs()
2976 for (i = ndev->irqp.num_ent - 1; i >= 0; i--) { in free_irqs()
2977 ent = ndev->irqp.entries + i; in free_irqs()
2979 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
2981 kfree(ndev->irqp.entries); in free_irqs()
2988 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_free() local
2990 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
2992 free_resources(ndev); in mlx5_vdpa_free()
2994 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
2996 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
2998 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
2999 free_irqs(ndev); in mlx5_vdpa_free()
3000 kfree(ndev->event_cbs); in mlx5_vdpa_free()
3001 kfree(ndev->vqs); in mlx5_vdpa_free()
3008 struct mlx5_vdpa_net *ndev; in mlx5_get_vq_notification() local
3021 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
3022 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3031 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_irq() local
3040 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3054 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in counter_set_query() argument
3063 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3073 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3076 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3091 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vendor_vq_stats() local
3098 down_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3112 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3113 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); in mlx5_vdpa_get_vendor_vq_stats()
3137 up_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3155 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_suspend() local
3161 down_write(&ndev->reslock); in mlx5_vdpa_suspend()
3162 unregister_link_notifier(ndev); in mlx5_vdpa_suspend()
3163 for (i = 0; i < ndev->cur_num_vqs; i++) { in mlx5_vdpa_suspend()
3164 mvq = &ndev->vqs[i]; in mlx5_vdpa_suspend()
3165 suspend_vq(ndev, mvq); in mlx5_vdpa_suspend()
3169 up_write(&ndev->reslock); in mlx5_vdpa_suspend()
3233 static int alloc_resources(struct mlx5_vdpa_net *ndev) in alloc_resources() argument
3235 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
3239 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
3243 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
3247 err = create_tis(ndev); in alloc_resources()
3256 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
3260 static void free_resources(struct mlx5_vdpa_net *ndev) in free_resources() argument
3262 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
3267 destroy_tis(ndev); in free_resources()
3268 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
3272 static void init_mvqs(struct mlx5_vdpa_net *ndev) in init_mvqs() argument
3277 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
3278 mvq = &ndev->vqs[i]; in init_mvqs()
3281 mvq->ndev = ndev; in init_mvqs()
3285 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
3286 mvq = &ndev->vqs[i]; in init_mvqs()
3289 mvq->ndev = ndev; in init_mvqs()
3296 struct mlx5_vdpa_net *ndev; member
3321 static void allocate_irqs(struct mlx5_vdpa_net *ndev) in allocate_irqs() argument
3326 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3329 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3332 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3333 if (!ndev->irqp.entries) in allocate_irqs()
3337 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3338 ent = ndev->irqp.entries + i; in allocate_irqs()
3340 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3341 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3345 ndev->irqp.num_ent++; in allocate_irqs()
3356 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_dev_add() local
3363 if (mgtdev->ndev) in mlx5_vdpa_dev_add()
3410 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_dev_add()
3412 if (IS_ERR(ndev)) in mlx5_vdpa_dev_add()
3413 return PTR_ERR(ndev); in mlx5_vdpa_dev_add()
3415 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3416 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3419 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3420 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3421 if (!ndev->vqs || !ndev->event_cbs) { in mlx5_vdpa_dev_add()
3426 init_mvqs(ndev); in mlx5_vdpa_dev_add()
3427 allocate_irqs(ndev); in mlx5_vdpa_dev_add()
3428 init_rwsem(&ndev->reslock); in mlx5_vdpa_dev_add()
3429 config = &ndev->config; in mlx5_vdpa_dev_add()
3442 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3447 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3449 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3453 memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN); in mlx5_vdpa_dev_add()
3477 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3486 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3488 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3498 err = alloc_resources(ndev); in mlx5_vdpa_dev_add()
3502 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3503 INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); in mlx5_vdpa_dev_add()
3515 mgtdev->ndev = ndev; in mlx5_vdpa_dev_add()
3521 free_resources(ndev); in mlx5_vdpa_dev_add()
3525 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3538 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_dev_del() local
3541 unregister_link_notifier(ndev); in mlx5_vdpa_dev_del()
3546 mgtdev->ndev = NULL; in mlx5_vdpa_dev_del()