• Home
  • Raw
  • Download

Lines Matching full:ndev

114 	struct mlx5_vdpa_net *ndev;  member
146 static void free_resources(struct mlx5_vdpa_net *ndev);
147 static void init_mvqs(struct mlx5_vdpa_net *ndev);
148 static int setup_driver(struct mlx5_vdpa_net *ndev);
149 static void teardown_driver(struct mlx5_vdpa_net *ndev);
237 static int create_tis(struct mlx5_vdpa_net *ndev) in create_tis() argument
239 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
245 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
246 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
253 static void destroy_tis(struct mlx5_vdpa_net *ndev) in destroy_tis() argument
255 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
261 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument
268 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
269 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
281 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) in umem_frag_buf_alloc() argument
285 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
286 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
289 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) in cq_frag_buf_free() argument
291 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
329 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, in qp_prepare() argument
337 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
351 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
353 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
363 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) in rq_buf_alloc() argument
365 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
367 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
370 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in rq_buf_free() argument
372 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
375 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
378 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
387 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
391 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
403 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
407 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
417 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
427 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
430 rq_buf_free(ndev, vqp); in qp_create()
435 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in qp_destroy() argument
441 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
442 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
443 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
445 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
446 rq_buf_free(ndev, vqp); in qp_destroy()
483 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp() local
484 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
507 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) in cq_create() argument
509 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
510 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
511 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
529 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
543 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
559 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
578 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
580 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
584 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) in cq_destroy() argument
586 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
587 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
591 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
594 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
595 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
598 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
601 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in set_umem_size()
625 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) in umem_frag_buf_free() argument
627 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
630 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
640 set_umem_size(ndev, mvq, num, &umem); in create_umem()
641 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
654 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
662 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
664 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
676 umem_frag_buf_free(ndev, umem); in create_umem()
680 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
700 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
703 umem_frag_buf_free(ndev, umem); in umem_destroy()
706 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
712 err = create_umem(ndev, mvq, num); in umems_create()
720 umem_destroy(ndev, mvq, num); in umems_create()
725 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
730 umem_destroy(ndev, mvq, num); in umems_destroy()
733 static int get_queue_type(struct mlx5_vdpa_net *ndev) in get_queue_type() argument
737 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
761 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
771 err = umems_create(ndev, mvq); in create_virtqueue()
785 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
791 get_features_12_3(ndev->mvdev.actual_features)); in create_virtqueue()
793 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); in create_virtqueue()
796 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
803 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
807 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key); in create_virtqueue()
814 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
815 if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) in create_virtqueue()
818 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
830 umems_destroy(ndev, mvq); in create_virtqueue()
834 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
842 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
845 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
846 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
849 umems_destroy(ndev, mvq); in destroy_virtqueue()
862 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, in alloc_inout() argument
878 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
890 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
907 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
925 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
957 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
965 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
969 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
974 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
978 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
982 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
986 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
990 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
994 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
998 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1002 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1011 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1030 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1031 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1048 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1066 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1072 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1080 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1089 mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n"); in setup_vq()
1093 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1097 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1101 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1105 err = connect_qps(ndev, mvq); in setup_vq()
1109 err = create_virtqueue(ndev, mvq); in setup_vq()
1114 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1116 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1126 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1128 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1130 cq_destroy(ndev, idx); in setup_vq()
1134 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1144 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1145 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1147 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1148 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1155 static void suspend_vqs(struct mlx5_vdpa_net *ndev) in suspend_vqs() argument
1160 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1163 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1168 suspend_vq(ndev, mvq); in teardown_vq()
1169 destroy_virtqueue(ndev, mvq); in teardown_vq()
1170 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1171 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1172 cq_destroy(ndev, mvq->index); in teardown_vq()
1176 static int create_rqt(struct mlx5_vdpa_net *ndev) in create_rqt() argument
1186 log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); in create_rqt()
1195 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1202 for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) { in create_rqt()
1203 if (!ndev->vqs[j].initialized) in create_rqt()
1206 if (!vq_is_tx(ndev->vqs[j].index)) { in create_rqt()
1207 list[i] = cpu_to_be32(ndev->vqs[j].virtq_id); in create_rqt()
1212 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1220 static void destroy_rqt(struct mlx5_vdpa_net *ndev) in destroy_rqt() argument
1222 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1225 static int create_tir(struct mlx5_vdpa_net *ndev) in create_tir() argument
1245 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1259 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1260 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1262 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1267 static void destroy_tir(struct mlx5_vdpa_net *ndev) in destroy_tir() argument
1269 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1272 static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev) in add_fwd_to_tir() argument
1284 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in add_fwd_to_tir()
1286 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n"); in add_fwd_to_tir()
1290 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in add_fwd_to_tir()
1291 if (IS_ERR(ndev->rxft)) in add_fwd_to_tir()
1292 return PTR_ERR(ndev->rxft); in add_fwd_to_tir()
1294 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_fwd_to_tir()
1295 if (IS_ERR(ndev->rx_counter)) { in add_fwd_to_tir()
1296 err = PTR_ERR(ndev->rx_counter); in add_fwd_to_tir()
1302 dest[0].tir_num = ndev->res.tirn; in add_fwd_to_tir()
1304 dest[1].counter_id = mlx5_fc_id(ndev->rx_counter); in add_fwd_to_tir()
1305 ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2); in add_fwd_to_tir()
1306 if (IS_ERR(ndev->rx_rule)) { in add_fwd_to_tir()
1307 err = PTR_ERR(ndev->rx_rule); in add_fwd_to_tir()
1308 ndev->rx_rule = NULL; in add_fwd_to_tir()
1315 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in add_fwd_to_tir()
1317 mlx5_destroy_flow_table(ndev->rxft); in add_fwd_to_tir()
1321 static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev) in remove_fwd_to_tir() argument
1323 if (!ndev->rx_rule) in remove_fwd_to_tir()
1326 mlx5_del_flow_rules(ndev->rx_rule); in remove_fwd_to_tir()
1327 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in remove_fwd_to_tir()
1328 mlx5_destroy_flow_table(ndev->rxft); in remove_fwd_to_tir()
1330 ndev->rx_rule = NULL; in remove_fwd_to_tir()
1336 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq() local
1337 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
1342 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
1349 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address() local
1350 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
1361 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num() local
1364 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
1371 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb() local
1372 struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_cb()
1380 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready() local
1381 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
1384 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
1392 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready() local
1393 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_ready()
1402 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state() local
1403 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
1418 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state() local
1419 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
1436 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
1475 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_features() local
1479 ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features); in mlx5_vdpa_get_features()
1481 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1); in mlx5_vdpa_get_features()
1482 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); in mlx5_vdpa_get_features()
1483 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_features()
1484 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_features()
1495 static int setup_virtqueues(struct mlx5_vdpa_net *ndev) in setup_virtqueues() argument
1500 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) { in setup_virtqueues()
1501 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
1510 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
1515 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) in teardown_virtqueues() argument
1520 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
1521 mvq = &ndev->vqs[i]; in teardown_virtqueues()
1525 teardown_vq(ndev, mvq); in teardown_virtqueues()
1544 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_features() local
1553 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_features()
1554 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu); in mlx5_vdpa_set_features()
1555 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_set_features()
1584 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status() local
1586 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
1587 return ndev->mvdev.status; in mlx5_vdpa_get_status()
1590 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
1599 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
1615 static int save_channels_info(struct mlx5_vdpa_net *ndev) in save_channels_info() argument
1619 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
1620 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
1621 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
1626 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) in mlx5_clear_vqs() argument
1630 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
1631 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
1634 static void restore_channels_info(struct mlx5_vdpa_net *ndev) in restore_channels_info() argument
1640 mlx5_clear_vqs(ndev); in restore_channels_info()
1641 init_mvqs(ndev); in restore_channels_info()
1642 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
1643 mvq = &ndev->vqs[i]; in restore_channels_info()
1659 static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb) in mlx5_vdpa_change_map() argument
1663 suspend_vqs(ndev); in mlx5_vdpa_change_map()
1664 err = save_channels_info(ndev); in mlx5_vdpa_change_map()
1668 teardown_driver(ndev); in mlx5_vdpa_change_map()
1669 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1670 err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb); in mlx5_vdpa_change_map()
1674 if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_vdpa_change_map()
1677 restore_channels_info(ndev); in mlx5_vdpa_change_map()
1678 err = setup_driver(ndev); in mlx5_vdpa_change_map()
1685 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1690 static int setup_driver(struct mlx5_vdpa_net *ndev) in setup_driver() argument
1694 mutex_lock(&ndev->reslock); in setup_driver()
1695 if (ndev->setup) { in setup_driver()
1696 mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n"); in setup_driver()
1700 err = setup_virtqueues(ndev); in setup_driver()
1702 mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n"); in setup_driver()
1706 err = create_rqt(ndev); in setup_driver()
1708 mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n"); in setup_driver()
1712 err = create_tir(ndev); in setup_driver()
1714 mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n"); in setup_driver()
1718 err = add_fwd_to_tir(ndev); in setup_driver()
1720 mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n"); in setup_driver()
1723 ndev->setup = true; in setup_driver()
1724 mutex_unlock(&ndev->reslock); in setup_driver()
1729 destroy_tir(ndev); in setup_driver()
1731 destroy_rqt(ndev); in setup_driver()
1733 teardown_virtqueues(ndev); in setup_driver()
1735 mutex_unlock(&ndev->reslock); in setup_driver()
1739 static void teardown_driver(struct mlx5_vdpa_net *ndev) in teardown_driver() argument
1741 mutex_lock(&ndev->reslock); in teardown_driver()
1742 if (!ndev->setup) in teardown_driver()
1745 remove_fwd_to_tir(ndev); in teardown_driver()
1746 destroy_tir(ndev); in teardown_driver()
1747 destroy_rqt(ndev); in teardown_driver()
1748 teardown_virtqueues(ndev); in teardown_driver()
1749 ndev->setup = false; in teardown_driver()
1751 mutex_unlock(&ndev->reslock); in teardown_driver()
1754 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) in clear_vqs_ready() argument
1758 for (i = 0; i < ndev->mvdev.max_vqs; i++) in clear_vqs_ready()
1759 ndev->vqs[i].ready = false; in clear_vqs_ready()
1765 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status() local
1771 teardown_driver(ndev); in mlx5_vdpa_set_status()
1772 clear_vqs_ready(ndev); in mlx5_vdpa_set_status()
1773 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1774 ndev->mvdev.status = 0; in mlx5_vdpa_set_status()
1775 ndev->mvdev.mlx_features = 0; in mlx5_vdpa_set_status()
1780 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
1782 err = setup_driver(ndev); in mlx5_vdpa_set_status()
1793 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
1797 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1798 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
1805 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config() local
1808 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
1827 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map() local
1838 return mlx5_vdpa_change_map(ndev, iotlb); in mlx5_vdpa_set_map()
1847 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_free() local
1849 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
1851 free_resources(ndev); in mlx5_vdpa_free()
1852 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
1854 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
1856 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
1857 mutex_destroy(&ndev->reslock); in mlx5_vdpa_free()
1912 static int alloc_resources(struct mlx5_vdpa_net *ndev) in alloc_resources() argument
1914 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
1918 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
1922 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
1926 err = create_tis(ndev); in alloc_resources()
1935 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
1939 static void free_resources(struct mlx5_vdpa_net *ndev) in free_resources() argument
1941 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
1946 destroy_tis(ndev); in free_resources()
1947 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
1951 static void init_mvqs(struct mlx5_vdpa_net *ndev) in init_mvqs() argument
1956 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) { in init_mvqs()
1957 mvq = &ndev->vqs[i]; in init_mvqs()
1960 mvq->ndev = ndev; in init_mvqs()
1963 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
1964 mvq = &ndev->vqs[i]; in init_mvqs()
1967 mvq->ndev = ndev; in init_mvqs()
1976 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_add_dev() local
1984 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_add_dev()
1986 if (IS_ERR(ndev)) in mlx5_vdpa_add_dev()
1987 return ndev; in mlx5_vdpa_add_dev()
1989 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_add_dev()
1990 mvdev = &ndev->mvdev; in mlx5_vdpa_add_dev()
1992 init_mvqs(ndev); in mlx5_vdpa_add_dev()
1993 mutex_init(&ndev->reslock); in mlx5_vdpa_add_dev()
1994 config = &ndev->config; in mlx5_vdpa_add_dev()
1995 err = query_mtu(mdev, &ndev->mtu); in mlx5_vdpa_add_dev()
2011 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
2015 err = alloc_resources(ndev); in mlx5_vdpa_add_dev()
2023 return ndev; in mlx5_vdpa_add_dev()
2026 free_resources(ndev); in mlx5_vdpa_add_dev()
2028 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
2033 mutex_destroy(&ndev->reslock); in mlx5_vdpa_add_dev()