• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2 // Copyright (c) 2019 Hisilicon Limited.
3 
4 #include <rdma/rdma_cm.h>
5 #include <rdma/restrack.h>
6 #include <uapi/rdma/rdma_netlink.h>
7 #include "hns_roce_common.h"
8 #include "hns_roce_device.h"
9 #include "hns_roce_hw_v2.h"
10 
hns_roce_fill_res_cq_entry(struct sk_buff * msg,struct ib_cq * ib_cq)11 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
12 {
13 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
14 	struct nlattr *table_attr;
15 
16 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
17 	if (!table_attr)
18 		return -EMSGSIZE;
19 
20 	if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
21 		goto err;
22 
23 	if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
24 		goto err;
25 
26 	if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
27 		goto err;
28 
29 	if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
30 		goto err;
31 
32 	nla_nest_end(msg, table_attr);
33 
34 	return 0;
35 
36 err:
37 	nla_nest_cancel(msg, table_attr);
38 
39 	return -EMSGSIZE;
40 }
41 
hns_roce_fill_res_cq_entry_raw(struct sk_buff * msg,struct ib_cq * ib_cq)42 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
43 {
44 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
45 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
46 	struct hns_roce_v2_cq_context context;
47 	int ret;
48 
49 	if (!hr_dev->hw->query_cqc)
50 		return -EINVAL;
51 
52 	ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
53 	if (ret)
54 		return -EINVAL;
55 
56 	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
57 
58 	return ret;
59 }
60 
hns_roce_fill_res_qp_entry(struct sk_buff * msg,struct ib_qp * ib_qp)61 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
62 {
63 	struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
64 	struct nlattr *table_attr;
65 
66 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
67 	if (!table_attr)
68 		return -EMSGSIZE;
69 
70 	if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
71 		goto err;
72 
73 	if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
74 		goto err;
75 
76 	if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
77 		goto err;
78 
79 	if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
80 		goto err;
81 
82 	if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
83 		goto err;
84 
85 	nla_nest_end(msg, table_attr);
86 
87 	return 0;
88 
89 err:
90 	nla_nest_cancel(msg, table_attr);
91 
92 	return -EMSGSIZE;
93 }
94 
hns_roce_fill_res_qp_entry_raw(struct sk_buff * msg,struct ib_qp * ib_qp)95 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
96 {
97 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
98 	struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
99 	struct hns_roce_v2_qp_context context;
100 	int ret;
101 
102 	if (!hr_dev->hw->query_qpc)
103 		return -EINVAL;
104 
105 	ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
106 	if (ret)
107 		return -EINVAL;
108 
109 	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
110 
111 	return ret;
112 }
113 
hns_roce_fill_res_mr_entry(struct sk_buff * msg,struct ib_mr * ib_mr)114 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
115 {
116 	struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
117 	struct nlattr *table_attr;
118 
119 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
120 	if (!table_attr)
121 		return -EMSGSIZE;
122 
123 	if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
124 		goto err;
125 
126 	if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
127 				       hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
128 		goto err;
129 
130 	if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
131 				       hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
132 		goto err;
133 
134 	nla_nest_end(msg, table_attr);
135 
136 	return 0;
137 
138 err:
139 	nla_nest_cancel(msg, table_attr);
140 
141 	return -EMSGSIZE;
142 }
143 
hns_roce_fill_res_mr_entry_raw(struct sk_buff * msg,struct ib_mr * ib_mr)144 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
145 {
146 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
147 	struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
148 	struct hns_roce_v2_mpt_entry context;
149 	int ret;
150 
151 	if (!hr_dev->hw->query_mpt)
152 		return -EINVAL;
153 
154 	ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
155 	if (ret)
156 		return -EINVAL;
157 
158 	ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
159 
160 	return ret;
161 }
162