• Home
  • Raw
  • Download

Lines Matching full:ndev

114 	struct mlx5_vdpa_net *ndev;  member
146 static void free_resources(struct mlx5_vdpa_net *ndev);
147 static void init_mvqs(struct mlx5_vdpa_net *ndev);
148 static int setup_driver(struct mlx5_vdpa_net *ndev);
149 static void teardown_driver(struct mlx5_vdpa_net *ndev);
237 static int create_tis(struct mlx5_vdpa_net *ndev) in create_tis() argument
239 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
245 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
246 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
253 static void destroy_tis(struct mlx5_vdpa_net *ndev) in destroy_tis() argument
255 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
261 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument
268 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
269 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
281 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) in umem_frag_buf_alloc() argument
285 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
286 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
289 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) in cq_frag_buf_free() argument
291 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
329 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, in qp_prepare() argument
337 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
351 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
353 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
363 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) in rq_buf_alloc() argument
365 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
367 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
370 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in rq_buf_free() argument
372 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
375 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
378 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
387 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
391 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
403 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
407 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
417 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
427 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
430 rq_buf_free(ndev, vqp); in qp_create()
435 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in qp_destroy() argument
441 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
442 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
443 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
445 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
446 rq_buf_free(ndev, vqp); in qp_destroy()
483 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp() local
484 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
507 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) in cq_create() argument
509 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
510 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
511 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
529 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
543 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
559 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
578 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
580 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
584 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) in cq_destroy() argument
586 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
587 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
591 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
594 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
595 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
598 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
601 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in set_umem_size()
625 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) in umem_frag_buf_free() argument
627 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
630 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
640 set_umem_size(ndev, mvq, num, &umem); in create_umem()
641 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
654 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
662 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
664 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
676 umem_frag_buf_free(ndev, umem); in create_umem()
680 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
700 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
703 umem_frag_buf_free(ndev, umem); in umem_destroy()
706 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
712 err = create_umem(ndev, mvq, num); in umems_create()
720 umem_destroy(ndev, mvq, num); in umems_create()
725 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
730 umem_destroy(ndev, mvq, num); in umems_destroy()
733 static int get_queue_type(struct mlx5_vdpa_net *ndev) in get_queue_type() argument
737 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
761 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
771 err = umems_create(ndev, mvq); in create_virtqueue()
785 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
791 get_features_12_3(ndev->mvdev.actual_features)); in create_virtqueue()
793 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); in create_virtqueue()
796 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
803 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
807 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key); in create_virtqueue()
814 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
816 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
828 umems_destroy(ndev, mvq); in create_virtqueue()
832 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
840 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
843 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
844 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
847 umems_destroy(ndev, mvq); in destroy_virtqueue()
860 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, in alloc_inout() argument
876 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
888 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
905 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
923 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
955 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
963 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
967 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
972 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
976 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
980 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
984 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
988 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
992 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
996 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1000 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1009 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1028 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1029 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1046 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1064 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1070 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1078 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1087 mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n"); in setup_vq()
1091 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1095 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1099 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1103 err = connect_qps(ndev, mvq); in setup_vq()
1107 err = create_virtqueue(ndev, mvq); in setup_vq()
1112 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1114 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1124 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1126 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1128 cq_destroy(ndev, idx); in setup_vq()
1132 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1142 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1143 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1145 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1146 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1153 static void suspend_vqs(struct mlx5_vdpa_net *ndev) in suspend_vqs() argument
1158 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1161 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1166 suspend_vq(ndev, mvq); in teardown_vq()
1167 destroy_virtqueue(ndev, mvq); in teardown_vq()
1168 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1169 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1170 cq_destroy(ndev, mvq->index); in teardown_vq()
1174 static int create_rqt(struct mlx5_vdpa_net *ndev) in create_rqt() argument
1184 log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); in create_rqt()
1193 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1200 for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) { in create_rqt()
1201 if (!ndev->vqs[j].initialized) in create_rqt()
1204 if (!vq_is_tx(ndev->vqs[j].index)) { in create_rqt()
1205 list[i] = cpu_to_be32(ndev->vqs[j].virtq_id); in create_rqt()
1210 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1218 static void destroy_rqt(struct mlx5_vdpa_net *ndev) in destroy_rqt() argument
1220 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1223 static int create_tir(struct mlx5_vdpa_net *ndev) in create_tir() argument
1243 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1257 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1258 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1260 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1265 static void destroy_tir(struct mlx5_vdpa_net *ndev) in destroy_tir() argument
1267 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1270 static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev) in add_fwd_to_tir() argument
1282 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in add_fwd_to_tir()
1284 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n"); in add_fwd_to_tir()
1288 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in add_fwd_to_tir()
1289 if (IS_ERR(ndev->rxft)) in add_fwd_to_tir()
1290 return PTR_ERR(ndev->rxft); in add_fwd_to_tir()
1292 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_fwd_to_tir()
1293 if (IS_ERR(ndev->rx_counter)) { in add_fwd_to_tir()
1294 err = PTR_ERR(ndev->rx_counter); in add_fwd_to_tir()
1300 dest[0].tir_num = ndev->res.tirn; in add_fwd_to_tir()
1302 dest[1].counter_id = mlx5_fc_id(ndev->rx_counter); in add_fwd_to_tir()
1303 ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2); in add_fwd_to_tir()
1304 if (IS_ERR(ndev->rx_rule)) { in add_fwd_to_tir()
1305 err = PTR_ERR(ndev->rx_rule); in add_fwd_to_tir()
1306 ndev->rx_rule = NULL; in add_fwd_to_tir()
1313 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in add_fwd_to_tir()
1315 mlx5_destroy_flow_table(ndev->rxft); in add_fwd_to_tir()
1319 static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev) in remove_fwd_to_tir() argument
1321 if (!ndev->rx_rule) in remove_fwd_to_tir()
1324 mlx5_del_flow_rules(ndev->rx_rule); in remove_fwd_to_tir()
1325 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in remove_fwd_to_tir()
1326 mlx5_destroy_flow_table(ndev->rxft); in remove_fwd_to_tir()
1328 ndev->rx_rule = NULL; in remove_fwd_to_tir()
1334 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq() local
1335 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
1340 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
1347 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address() local
1348 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
1359 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num() local
1362 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
1369 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb() local
1370 struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_cb()
1378 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready() local
1379 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
1382 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
1390 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready() local
1391 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_ready()
1400 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state() local
1401 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
1416 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state() local
1417 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
1434 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
1473 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_features() local
1477 ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features); in mlx5_vdpa_get_features()
1479 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1); in mlx5_vdpa_get_features()
1480 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); in mlx5_vdpa_get_features()
1481 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_features()
1482 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_features()
1507 static int setup_virtqueues(struct mlx5_vdpa_net *ndev) in setup_virtqueues() argument
1512 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) { in setup_virtqueues()
1513 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
1522 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
1527 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) in teardown_virtqueues() argument
1532 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
1533 mvq = &ndev->vqs[i]; in teardown_virtqueues()
1537 teardown_vq(ndev, mvq); in teardown_virtqueues()
1556 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_features() local
1565 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_features()
1566 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu); in mlx5_vdpa_set_features()
1567 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_set_features()
1596 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status() local
1598 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
1599 return ndev->mvdev.status; in mlx5_vdpa_get_status()
1602 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
1611 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
1627 static int save_channels_info(struct mlx5_vdpa_net *ndev) in save_channels_info() argument
1631 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
1632 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
1633 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
1638 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) in mlx5_clear_vqs() argument
1642 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
1643 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
1646 static void restore_channels_info(struct mlx5_vdpa_net *ndev) in restore_channels_info() argument
1652 mlx5_clear_vqs(ndev); in restore_channels_info()
1653 init_mvqs(ndev); in restore_channels_info()
1654 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
1655 mvq = &ndev->vqs[i]; in restore_channels_info()
1671 static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb) in mlx5_vdpa_change_map() argument
1675 suspend_vqs(ndev); in mlx5_vdpa_change_map()
1676 err = save_channels_info(ndev); in mlx5_vdpa_change_map()
1680 teardown_driver(ndev); in mlx5_vdpa_change_map()
1681 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1682 err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb); in mlx5_vdpa_change_map()
1686 if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_vdpa_change_map()
1689 restore_channels_info(ndev); in mlx5_vdpa_change_map()
1690 err = setup_driver(ndev); in mlx5_vdpa_change_map()
1697 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1702 static int setup_driver(struct mlx5_vdpa_net *ndev) in setup_driver() argument
1706 mutex_lock(&ndev->reslock); in setup_driver()
1707 if (ndev->setup) { in setup_driver()
1708 mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n"); in setup_driver()
1712 err = setup_virtqueues(ndev); in setup_driver()
1714 mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n"); in setup_driver()
1718 err = create_rqt(ndev); in setup_driver()
1720 mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n"); in setup_driver()
1724 err = create_tir(ndev); in setup_driver()
1726 mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n"); in setup_driver()
1730 err = add_fwd_to_tir(ndev); in setup_driver()
1732 mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n"); in setup_driver()
1735 ndev->setup = true; in setup_driver()
1736 mutex_unlock(&ndev->reslock); in setup_driver()
1741 destroy_tir(ndev); in setup_driver()
1743 destroy_rqt(ndev); in setup_driver()
1745 teardown_virtqueues(ndev); in setup_driver()
1747 mutex_unlock(&ndev->reslock); in setup_driver()
1751 static void teardown_driver(struct mlx5_vdpa_net *ndev) in teardown_driver() argument
1753 mutex_lock(&ndev->reslock); in teardown_driver()
1754 if (!ndev->setup) in teardown_driver()
1757 remove_fwd_to_tir(ndev); in teardown_driver()
1758 destroy_tir(ndev); in teardown_driver()
1759 destroy_rqt(ndev); in teardown_driver()
1760 teardown_virtqueues(ndev); in teardown_driver()
1761 ndev->setup = false; in teardown_driver()
1763 mutex_unlock(&ndev->reslock); in teardown_driver()
1766 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) in clear_vqs_ready() argument
1770 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
1771 ndev->vqs[i].ready = false; in clear_vqs_ready()
1777 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status() local
1783 teardown_driver(ndev); in mlx5_vdpa_set_status()
1784 clear_vqs_ready(ndev); in mlx5_vdpa_set_status()
1785 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1786 ndev->mvdev.status = 0; in mlx5_vdpa_set_status()
1787 ndev->mvdev.mlx_features = 0; in mlx5_vdpa_set_status()
1792 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
1794 err = setup_driver(ndev); in mlx5_vdpa_set_status()
1805 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
1809 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1810 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
1817 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config() local
1820 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
1839 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map() local
1850 return mlx5_vdpa_change_map(ndev, iotlb); in mlx5_vdpa_set_map()
1859 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_free() local
1861 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
1863 free_resources(ndev); in mlx5_vdpa_free()
1864 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
1866 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
1868 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
1869 mutex_destroy(&ndev->reslock); in mlx5_vdpa_free()
1924 static int alloc_resources(struct mlx5_vdpa_net *ndev) in alloc_resources() argument
1926 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
1930 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
1934 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
1938 err = create_tis(ndev); in alloc_resources()
1947 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
1951 static void free_resources(struct mlx5_vdpa_net *ndev) in free_resources() argument
1953 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
1958 destroy_tis(ndev); in free_resources()
1959 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
1963 static void init_mvqs(struct mlx5_vdpa_net *ndev) in init_mvqs() argument
1968 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) { in init_mvqs()
1969 mvq = &ndev->vqs[i]; in init_mvqs()
1972 mvq->ndev = ndev; in init_mvqs()
1975 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
1976 mvq = &ndev->vqs[i]; in init_mvqs()
1979 mvq->ndev = ndev; in init_mvqs()
1988 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_add_dev() local
1996 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_add_dev()
1998 if (IS_ERR(ndev)) in mlx5_vdpa_add_dev()
1999 return ndev; in mlx5_vdpa_add_dev()
2001 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_add_dev()
2002 mvdev = &ndev->mvdev; in mlx5_vdpa_add_dev()
2004 init_mvqs(ndev); in mlx5_vdpa_add_dev()
2005 mutex_init(&ndev->reslock); in mlx5_vdpa_add_dev()
2006 config = &ndev->config; in mlx5_vdpa_add_dev()
2007 err = query_mtu(mdev, &ndev->mtu); in mlx5_vdpa_add_dev()
2023 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
2027 err = alloc_resources(ndev); in mlx5_vdpa_add_dev()
2035 return ndev; in mlx5_vdpa_add_dev()
2038 free_resources(ndev); in mlx5_vdpa_add_dev()
2040 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
2045 mutex_destroy(&ndev->reslock); in mlx5_vdpa_add_dev()