• Home
  • Raw
  • Download

Lines Matching refs:ndev

118 	struct mlx5_vdpa_net *ndev;  member
140 static void free_resources(struct mlx5_vdpa_net *ndev);
141 static void init_mvqs(struct mlx5_vdpa_net *ndev);
143 static void teardown_driver(struct mlx5_vdpa_net *ndev);
261 static int create_tis(struct mlx5_vdpa_net *ndev) in create_tis() argument
263 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
269 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
270 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
277 static void destroy_tis(struct mlx5_vdpa_net *ndev) in destroy_tis() argument
279 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
285 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument
292 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
293 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
305 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) in umem_frag_buf_alloc() argument
309 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
310 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
313 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) in cq_frag_buf_free() argument
315 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
353 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, in qp_prepare() argument
361 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
375 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
377 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
387 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) in rq_buf_alloc() argument
389 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
391 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
394 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in rq_buf_free() argument
396 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
399 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
402 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
411 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
415 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
427 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
431 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
441 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
451 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
454 rq_buf_free(ndev, vqp); in qp_create()
459 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in qp_destroy() argument
465 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
466 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
467 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
469 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
470 rq_buf_free(ndev, vqp); in qp_destroy()
493 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions() local
496 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
511 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp() local
512 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
535 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) in cq_create() argument
537 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
538 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
539 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
557 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
571 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
587 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
606 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
608 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
612 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) in cq_destroy() argument
614 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
615 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
619 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
622 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
623 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
626 static int read_umem_params(struct mlx5_vdpa_net *ndev) in read_umem_params() argument
630 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
645 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
652 ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a); in read_umem_params()
653 ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b); in read_umem_params()
655 ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a); in read_umem_params()
656 ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b); in read_umem_params()
658 ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a); in read_umem_params()
659 ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b); in read_umem_params()
666 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
674 p_a = ndev->umem_1_buffer_param_a; in set_umem_size()
675 p_b = ndev->umem_1_buffer_param_b; in set_umem_size()
679 p_a = ndev->umem_2_buffer_param_a; in set_umem_size()
680 p_b = ndev->umem_2_buffer_param_b; in set_umem_size()
684 p_a = ndev->umem_3_buffer_param_a; in set_umem_size()
685 p_b = ndev->umem_3_buffer_param_b; in set_umem_size()
693 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) in umem_frag_buf_free() argument
695 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
698 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
708 set_umem_size(ndev, mvq, num, &umem); in create_umem()
709 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
722 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
730 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
732 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
744 umem_frag_buf_free(ndev, umem); in create_umem()
748 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
768 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
771 umem_frag_buf_free(ndev, umem); in umem_destroy()
774 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
780 err = create_umem(ndev, mvq, num); in umems_create()
788 umem_destroy(ndev, mvq, num); in umems_create()
793 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
798 umem_destroy(ndev, mvq, num); in umems_destroy()
801 static int get_queue_type(struct mlx5_vdpa_net *ndev) in get_queue_type() argument
805 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
858 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
869 err = umems_create(ndev, mvq); in create_virtqueue()
879 mlx_features = get_features(ndev->mvdev.actual_features); in create_virtqueue()
884 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
894 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); in create_virtqueue()
897 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
910 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
914 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey); in create_virtqueue()
921 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
922 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
925 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
938 umems_destroy(ndev, mvq); in create_virtqueue()
942 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
950 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
953 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
954 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
958 umems_destroy(ndev, mvq); in destroy_virtqueue()
971 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, in alloc_inout() argument
987 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
999 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1016 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1034 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1066 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
1074 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
1078 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1083 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
1087 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
1091 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
1095 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1099 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1103 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1107 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1111 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1120 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1139 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1140 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1171 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1195 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1201 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1209 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_alloc() argument
1216 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1223 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1225 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1234 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_dealloc() argument
1239 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1244 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1246 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1247 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1260 static void alloc_vector(struct mlx5_vdpa_net *ndev, in alloc_vector() argument
1263 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in alloc_vector()
1272 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1273 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1286 static void dealloc_vector(struct mlx5_vdpa_net *ndev, in dealloc_vector() argument
1289 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in dealloc_vector()
1300 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1311 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1315 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1319 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1323 err = connect_qps(ndev, mvq); in setup_vq()
1327 err = counter_set_alloc(ndev, mvq); in setup_vq()
1331 alloc_vector(ndev, mvq); in setup_vq()
1332 err = create_virtqueue(ndev, mvq); in setup_vq()
1337 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1339 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1349 destroy_virtqueue(ndev, mvq); in setup_vq()
1351 dealloc_vector(ndev, mvq); in setup_vq()
1352 counter_set_dealloc(ndev, mvq); in setup_vq()
1354 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1356 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1358 cq_destroy(ndev, idx); in setup_vq()
1362 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1372 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1373 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1375 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1376 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1383 static void suspend_vqs(struct mlx5_vdpa_net *ndev) in suspend_vqs() argument
1387 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
1388 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1391 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1396 suspend_vq(ndev, mvq); in teardown_vq()
1397 destroy_virtqueue(ndev, mvq); in teardown_vq()
1398 dealloc_vector(ndev, mvq); in teardown_vq()
1399 counter_set_dealloc(ndev, mvq); in teardown_vq()
1400 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1401 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1402 cq_destroy(ndev, mvq->index); in teardown_vq()
1406 static int create_rqt(struct mlx5_vdpa_net *ndev) in create_rqt() argument
1408 int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); in create_rqt()
1409 int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); in create_rqt()
1422 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1429 list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); in create_rqt()
1432 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1442 static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) in modify_rqt() argument
1457 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1464 list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); in modify_rqt()
1467 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1475 static void destroy_rqt(struct mlx5_vdpa_net *ndev) in destroy_rqt() argument
1477 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1480 static int create_tir(struct mlx5_vdpa_net *ndev) in create_tir() argument
1500 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1514 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1515 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1517 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1522 mlx5_vdpa_add_tirn(ndev); in create_tir()
1526 static void destroy_tir(struct mlx5_vdpa_net *ndev) in destroy_tir() argument
1528 mlx5_vdpa_remove_tirn(ndev); in destroy_tir()
1529 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1541 static int add_steering_counters(struct mlx5_vdpa_net *ndev, in add_steering_counters() argument
1549 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1553 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1564 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1571 static void remove_steering_counters(struct mlx5_vdpa_net *ndev, in remove_steering_counters() argument
1575 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1576 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1580 static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, in mlx5_vdpa_add_mac_vlan_rules() argument
1605 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1615 dests[0].tir_num = ndev->res.tirn; in mlx5_vdpa_add_mac_vlan_rules()
1616 err = add_steering_counters(ndev, node, &flow_act, dests); in mlx5_vdpa_add_mac_vlan_rules()
1623 node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1637 node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1643 mlx5_vdpa_add_rx_counters(ndev, node); in mlx5_vdpa_add_mac_vlan_rules()
1649 remove_steering_counters(ndev, node); in mlx5_vdpa_add_mac_vlan_rules()
1655 static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev, in mlx5_vdpa_del_mac_vlan_rules() argument
1658 mlx5_vdpa_remove_rx_counters(ndev, node); in mlx5_vdpa_del_mac_vlan_rules()
1681 static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value) in mac_vlan_lookup() argument
1687 hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) { in mac_vlan_lookup()
1694 static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vid, bool tagged) in mac_vlan_add() argument
1702 if (mac_vlan_lookup(ndev, val)) in mac_vlan_add()
1711 ptr->ndev = ndev; in mac_vlan_add()
1712 err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, ptr); in mac_vlan_add()
1717 hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]); in mac_vlan_add()
1725 static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) in mac_vlan_del() argument
1729 ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged)); in mac_vlan_del()
1734 mlx5_vdpa_del_mac_vlan_rules(ndev, ptr); in mac_vlan_del()
1735 remove_steering_counters(ndev, ptr); in mac_vlan_del()
1739 static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev) in clear_mac_vlan_table() argument
1746 hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) { in clear_mac_vlan_table()
1748 mlx5_vdpa_del_mac_vlan_rules(ndev, pos); in clear_mac_vlan_table()
1749 remove_steering_counters(ndev, pos); in clear_mac_vlan_table()
1755 static int setup_steering(struct mlx5_vdpa_net *ndev) in setup_steering() argument
1764 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
1766 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
1770 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in setup_steering()
1771 if (IS_ERR(ndev->rxft)) { in setup_steering()
1772 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
1773 return PTR_ERR(ndev->rxft); in setup_steering()
1775 mlx5_vdpa_add_rx_flow_table(ndev); in setup_steering()
1777 err = mac_vlan_add(ndev, ndev->config.mac, 0, false); in setup_steering()
1784 mlx5_vdpa_remove_rx_flow_table(ndev); in setup_steering()
1785 mlx5_destroy_flow_table(ndev->rxft); in setup_steering()
1789 static void teardown_steering(struct mlx5_vdpa_net *ndev) in teardown_steering() argument
1791 clear_mac_vlan_table(ndev); in teardown_steering()
1792 mlx5_vdpa_remove_rx_flow_table(ndev); in teardown_steering()
1793 mlx5_destroy_flow_table(ndev->rxft); in teardown_steering()
1798 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac() local
1812 if (!memcmp(ndev->config.mac, mac, 6)) { in handle_ctrl_mac()
1820 if (!is_zero_ether_addr(ndev->config.mac)) { in handle_ctrl_mac()
1821 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1823 ndev->config.mac); in handle_ctrl_mac()
1837 memcpy(mac_back, ndev->config.mac, ETH_ALEN); in handle_ctrl_mac()
1839 memcpy(ndev->config.mac, mac, ETH_ALEN); in handle_ctrl_mac()
1843 mac_vlan_del(ndev, mac_back, 0, false); in handle_ctrl_mac()
1845 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { in handle_ctrl_mac()
1857 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1859 ndev->config.mac); in handle_ctrl_mac()
1867 memcpy(ndev->config.mac, mac_back, ETH_ALEN); in handle_ctrl_mac()
1869 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) in handle_ctrl_mac()
1887 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps() local
1888 int cur_qps = ndev->cur_num_vqs / 2; in change_num_qps()
1893 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
1897 for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) in change_num_qps()
1898 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1900 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
1902 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
1904 err = setup_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1908 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
1916 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
1918 ndev->cur_num_vqs = 2 * cur_qps; in change_num_qps()
1925 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq() local
1952 newqps > ndev->rqt_size) in handle_ctrl_mq()
1955 if (ndev->cur_num_vqs == 2 * newqps) { in handle_ctrl_mq()
1973 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_vlan() local
1980 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
1990 if (mac_vlan_add(ndev, ndev->config.mac, id, true)) in handle_ctrl_vlan()
2001 mac_vlan_del(ndev, ndev->config.mac, id, true); in handle_ctrl_vlan()
2018 struct mlx5_vdpa_net *ndev; in mlx5_cvq_kick_handler() local
2024 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
2027 down_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2032 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2080 up_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2086 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq() local
2096 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2100 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2104 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2111 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address() local
2124 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2134 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num() local
2147 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2154 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb() local
2156 ndev->event_cbs[idx] = *cb; in mlx5_vdpa_set_vq_cb()
2185 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready() local
2200 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2202 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
2204 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in mlx5_vdpa_set_vq_ready()
2218 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready() local
2226 return ndev->vqs[idx].ready; in mlx5_vdpa_get_vq_ready()
2233 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state() local
2244 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2258 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state() local
2271 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2285 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
2359 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_device_features() local
2361 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2362 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2389 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues() local
2394 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2403 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2408 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) in teardown_virtqueues() argument
2413 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2414 mvq = &ndev->vqs[i]; in teardown_virtqueues()
2418 teardown_vq(ndev, mvq); in teardown_virtqueues()
2472 struct mlx5_vdpa_net *ndev; in update_carrier() local
2476 ndev = to_mlx5_vdpa_ndev(mvdev); in update_carrier()
2478 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2480 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2482 if (ndev->config_cb.callback) in update_carrier()
2483 ndev->config_cb.callback(ndev->config_cb.private); in update_carrier()
2488 static int queue_link_work(struct mlx5_vdpa_net *ndev) in queue_link_work() argument
2496 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2498 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2504 struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb); in event_handler() local
2512 if (queue_link_work(ndev)) in event_handler()
2525 static void register_link_notifier(struct mlx5_vdpa_net *ndev) in register_link_notifier() argument
2527 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2530 ndev->nb.notifier_call = event_handler; in register_link_notifier()
2531 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2532 ndev->nb_registered = true; in register_link_notifier()
2533 queue_link_work(ndev); in register_link_notifier()
2536 static void unregister_link_notifier(struct mlx5_vdpa_net *ndev) in unregister_link_notifier() argument
2538 if (!ndev->nb_registered) in unregister_link_notifier()
2541 ndev->nb_registered = false; in unregister_link_notifier()
2542 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2543 if (ndev->mvdev.wq) in unregister_link_notifier()
2544 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2550 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_driver_features() local
2559 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2560 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) in mlx5_vdpa_set_driver_features()
2561 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); in mlx5_vdpa_set_driver_features()
2563 ndev->rqt_size = 1; in mlx5_vdpa_set_driver_features()
2573 ndev->cur_num_vqs = 2; in mlx5_vdpa_set_driver_features()
2582 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_config_cb() local
2584 ndev->config_cb = *cb; in mlx5_vdpa_set_config_cb()
2606 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status() local
2608 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2609 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2612 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
2619 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
2636 static int save_channels_info(struct mlx5_vdpa_net *ndev) in save_channels_info() argument
2640 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2641 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
2642 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
2647 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) in mlx5_clear_vqs() argument
2651 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2652 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
2655 static void restore_channels_info(struct mlx5_vdpa_net *ndev) in restore_channels_info() argument
2661 mlx5_clear_vqs(ndev); in restore_channels_info()
2662 init_mvqs(ndev); in restore_channels_info()
2663 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2664 mvq = &ndev->vqs[i]; in restore_channels_info()
2683 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map() local
2686 suspend_vqs(ndev); in mlx5_vdpa_change_map()
2687 err = save_channels_info(ndev); in mlx5_vdpa_change_map()
2691 teardown_driver(ndev); in mlx5_vdpa_change_map()
2700 restore_channels_info(ndev); in mlx5_vdpa_change_map()
2716 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_driver() local
2719 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in setup_driver()
2721 if (ndev->setup) { in setup_driver()
2726 mlx5_vdpa_add_debugfs(ndev); in setup_driver()
2728 err = read_umem_params(ndev); in setup_driver()
2738 err = create_rqt(ndev); in setup_driver()
2744 err = create_tir(ndev); in setup_driver()
2750 err = setup_steering(ndev); in setup_driver()
2755 ndev->setup = true; in setup_driver()
2760 destroy_tir(ndev); in setup_driver()
2762 destroy_rqt(ndev); in setup_driver()
2764 teardown_virtqueues(ndev); in setup_driver()
2766 mlx5_vdpa_remove_debugfs(ndev); in setup_driver()
2772 static void teardown_driver(struct mlx5_vdpa_net *ndev) in teardown_driver() argument
2775 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in teardown_driver()
2777 if (!ndev->setup) in teardown_driver()
2780 mlx5_vdpa_remove_debugfs(ndev); in teardown_driver()
2781 teardown_steering(ndev); in teardown_driver()
2782 destroy_tir(ndev); in teardown_driver()
2783 destroy_rqt(ndev); in teardown_driver()
2784 teardown_virtqueues(ndev); in teardown_driver()
2785 ndev->setup = false; in teardown_driver()
2788 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) in clear_vqs_ready() argument
2792 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
2793 ndev->vqs[i].ready = false; in clear_vqs_ready()
2795 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2821 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status() local
2826 down_write(&ndev->reslock); in mlx5_vdpa_set_status()
2828 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
2835 register_link_notifier(ndev); in mlx5_vdpa_set_status()
2847 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
2848 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
2852 unregister_link_notifier(ndev); in mlx5_vdpa_set_status()
2854 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
2855 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
2857 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
2872 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset() local
2877 down_write(&ndev->reslock); in mlx5_vdpa_reset()
2878 unregister_link_notifier(ndev); in mlx5_vdpa_reset()
2879 teardown_driver(ndev); in mlx5_vdpa_reset()
2880 clear_vqs_ready(ndev); in mlx5_vdpa_reset()
2881 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_reset()
2882 ndev->mvdev.status = 0; in mlx5_vdpa_reset()
2883 ndev->mvdev.suspended = false; in mlx5_vdpa_reset()
2884 ndev->cur_num_vqs = 0; in mlx5_vdpa_reset()
2885 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_reset()
2886 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_reset()
2887 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_reset()
2888 ndev->mvdev.actual_features = 0; in mlx5_vdpa_reset()
2896 up_write(&ndev->reslock); in mlx5_vdpa_reset()
2910 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config() local
2913 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
2951 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map() local
2954 down_write(&ndev->reslock); in mlx5_vdpa_set_map()
2956 up_write(&ndev->reslock); in mlx5_vdpa_set_map()
2970 static void free_irqs(struct mlx5_vdpa_net *ndev) in free_irqs() argument
2975 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
2978 if (!ndev->irqp.entries) in free_irqs()
2981 for (i = ndev->irqp.num_ent - 1; i >= 0; i--) { in free_irqs()
2982 ent = ndev->irqp.entries + i; in free_irqs()
2984 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
2986 kfree(ndev->irqp.entries); in free_irqs()
2993 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_free() local
2995 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
2997 free_resources(ndev); in mlx5_vdpa_free()
2999 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
3001 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
3003 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
3004 free_irqs(ndev); in mlx5_vdpa_free()
3005 kfree(ndev->event_cbs); in mlx5_vdpa_free()
3006 kfree(ndev->vqs); in mlx5_vdpa_free()
3013 struct mlx5_vdpa_net *ndev; in mlx5_get_vq_notification() local
3026 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
3027 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3036 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_irq() local
3045 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3059 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in counter_set_query() argument
3068 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3078 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3081 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3096 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vendor_vq_stats() local
3103 down_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3117 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3118 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); in mlx5_vdpa_get_vendor_vq_stats()
3142 up_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3160 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_suspend() local
3166 down_write(&ndev->reslock); in mlx5_vdpa_suspend()
3167 unregister_link_notifier(ndev); in mlx5_vdpa_suspend()
3168 for (i = 0; i < ndev->cur_num_vqs; i++) { in mlx5_vdpa_suspend()
3169 mvq = &ndev->vqs[i]; in mlx5_vdpa_suspend()
3170 suspend_vq(ndev, mvq); in mlx5_vdpa_suspend()
3174 up_write(&ndev->reslock); in mlx5_vdpa_suspend()
3238 static int alloc_resources(struct mlx5_vdpa_net *ndev) in alloc_resources() argument
3240 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
3244 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
3248 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
3252 err = create_tis(ndev); in alloc_resources()
3261 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
3265 static void free_resources(struct mlx5_vdpa_net *ndev) in free_resources() argument
3267 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
3272 destroy_tis(ndev); in free_resources()
3273 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
3277 static void init_mvqs(struct mlx5_vdpa_net *ndev) in init_mvqs() argument
3282 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
3283 mvq = &ndev->vqs[i]; in init_mvqs()
3286 mvq->ndev = ndev; in init_mvqs()
3290 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
3291 mvq = &ndev->vqs[i]; in init_mvqs()
3294 mvq->ndev = ndev; in init_mvqs()
3301 struct mlx5_vdpa_net *ndev; member
3326 static void allocate_irqs(struct mlx5_vdpa_net *ndev) in allocate_irqs() argument
3331 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3334 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3337 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3338 if (!ndev->irqp.entries) in allocate_irqs()
3342 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3343 ent = ndev->irqp.entries + i; in allocate_irqs()
3345 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3346 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3350 ndev->irqp.num_ent++; in allocate_irqs()
3361 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_dev_add() local
3368 if (mgtdev->ndev) in mlx5_vdpa_dev_add()
3415 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_dev_add()
3417 if (IS_ERR(ndev)) in mlx5_vdpa_dev_add()
3418 return PTR_ERR(ndev); in mlx5_vdpa_dev_add()
3420 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3421 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3424 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3425 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3426 if (!ndev->vqs || !ndev->event_cbs) { in mlx5_vdpa_dev_add()
3431 init_mvqs(ndev); in mlx5_vdpa_dev_add()
3432 allocate_irqs(ndev); in mlx5_vdpa_dev_add()
3433 init_rwsem(&ndev->reslock); in mlx5_vdpa_dev_add()
3434 config = &ndev->config; in mlx5_vdpa_dev_add()
3447 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3452 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3454 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3458 memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN); in mlx5_vdpa_dev_add()
3482 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3491 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3493 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3503 err = alloc_resources(ndev); in mlx5_vdpa_dev_add()
3507 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3508 INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); in mlx5_vdpa_dev_add()
3520 mgtdev->ndev = ndev; in mlx5_vdpa_dev_add()
3526 free_resources(ndev); in mlx5_vdpa_dev_add()
3530 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3543 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_dev_del() local
3546 unregister_link_notifier(ndev); in mlx5_vdpa_dev_del()
3551 mgtdev->ndev = NULL; in mlx5_vdpa_dev_del()