1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3
4 #include <linux/math64.h>
5 #include <net/pkt_cls.h>
6 #include <net/pkt_sched.h>
7
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfp_port.h"
11
12 #define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
13
14 struct nfp_police_cfg_head {
15 __be32 flags_opts;
16 __be32 port;
17 };
18
19 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
20 * See RFC 2698 for more details.
21 * ----------------------------------------------------------------
22 * 3 2 1
23 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
24 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
25 * | Flag options |
26 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
27 * | Port Ingress |
28 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
29 * | Token Bucket Peak |
30 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
31 * | Token Bucket Committed |
32 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
33 * | Peak Burst Size |
34 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
35 * | Committed Burst Size |
36 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
37 * | Peak Information Rate |
38 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
39 * | Committed Information Rate |
40 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
41 */
42 struct nfp_police_config {
43 struct nfp_police_cfg_head head;
44 __be32 bkt_tkn_p;
45 __be32 bkt_tkn_c;
46 __be32 pbs;
47 __be32 cbs;
48 __be32 pir;
49 __be32 cir;
50 };
51
52 struct nfp_police_stats_reply {
53 struct nfp_police_cfg_head head;
54 __be64 pass_bytes;
55 __be64 pass_pkts;
56 __be64 drop_bytes;
57 __be64 drop_pkts;
58 };
59
60 static int
nfp_flower_install_rate_limiter(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow,struct netlink_ext_ack * extack)61 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
62 struct tc_cls_matchall_offload *flow,
63 struct netlink_ext_ack *extack)
64 {
65 struct flow_action_entry *action = &flow->rule->action.entries[0];
66 struct nfp_flower_priv *fl_priv = app->priv;
67 struct nfp_flower_repr_priv *repr_priv;
68 struct nfp_police_config *config;
69 struct nfp_repr *repr;
70 struct sk_buff *skb;
71 u32 netdev_port_id;
72 u32 burst;
73 u64 rate;
74
75 if (!nfp_netdev_is_nfp_repr(netdev)) {
76 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
77 return -EOPNOTSUPP;
78 }
79 repr = netdev_priv(netdev);
80 repr_priv = repr->app_priv;
81
82 if (repr_priv->block_shared) {
83 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
84 return -EOPNOTSUPP;
85 }
86
87 if (repr->port->type != NFP_PORT_VF_PORT) {
88 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
89 return -EOPNOTSUPP;
90 }
91
92 if (!flow_offload_has_one_action(&flow->rule->action)) {
93 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
94 return -EOPNOTSUPP;
95 }
96
97 if (flow->common.prio != 1) {
98 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
99 return -EOPNOTSUPP;
100 }
101
102 if (action->id != FLOW_ACTION_POLICE) {
103 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
104 return -EOPNOTSUPP;
105 }
106
107 rate = action->police.rate_bytes_ps;
108 burst = action->police.burst;
109 netdev_port_id = nfp_repr_get_port_id(netdev);
110
111 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
112 NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
113 if (!skb)
114 return -ENOMEM;
115
116 config = nfp_flower_cmsg_get_data(skb);
117 memset(config, 0, sizeof(struct nfp_police_config));
118 config->head.port = cpu_to_be32(netdev_port_id);
119 config->bkt_tkn_p = cpu_to_be32(burst);
120 config->bkt_tkn_c = cpu_to_be32(burst);
121 config->pbs = cpu_to_be32(burst);
122 config->cbs = cpu_to_be32(burst);
123 config->pir = cpu_to_be32(rate);
124 config->cir = cpu_to_be32(rate);
125 nfp_ctrl_tx(repr->app->ctrl, skb);
126
127 repr_priv->qos_table.netdev_port_id = netdev_port_id;
128 fl_priv->qos_rate_limiters++;
129 if (fl_priv->qos_rate_limiters == 1)
130 schedule_delayed_work(&fl_priv->qos_stats_work,
131 NFP_FL_QOS_UPDATE);
132
133 return 0;
134 }
135
136 static int
nfp_flower_remove_rate_limiter(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow,struct netlink_ext_ack * extack)137 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
138 struct tc_cls_matchall_offload *flow,
139 struct netlink_ext_ack *extack)
140 {
141 struct nfp_flower_priv *fl_priv = app->priv;
142 struct nfp_flower_repr_priv *repr_priv;
143 struct nfp_police_config *config;
144 struct nfp_repr *repr;
145 struct sk_buff *skb;
146 u32 netdev_port_id;
147
148 if (!nfp_netdev_is_nfp_repr(netdev)) {
149 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
150 return -EOPNOTSUPP;
151 }
152 repr = netdev_priv(netdev);
153
154 netdev_port_id = nfp_repr_get_port_id(netdev);
155 repr_priv = repr->app_priv;
156
157 if (!repr_priv->qos_table.netdev_port_id) {
158 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
159 return -EOPNOTSUPP;
160 }
161
162 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
163 NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
164 if (!skb)
165 return -ENOMEM;
166
167 /* Clear all qos associate data for this interface */
168 memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
169 fl_priv->qos_rate_limiters--;
170 if (!fl_priv->qos_rate_limiters)
171 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
172
173 config = nfp_flower_cmsg_get_data(skb);
174 memset(config, 0, sizeof(struct nfp_police_config));
175 config->head.port = cpu_to_be32(netdev_port_id);
176 nfp_ctrl_tx(repr->app->ctrl, skb);
177
178 return 0;
179 }
180
nfp_flower_stats_rlim_reply(struct nfp_app * app,struct sk_buff * skb)181 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
182 {
183 struct nfp_flower_priv *fl_priv = app->priv;
184 struct nfp_flower_repr_priv *repr_priv;
185 struct nfp_police_stats_reply *msg;
186 struct nfp_stat_pair *curr_stats;
187 struct nfp_stat_pair *prev_stats;
188 struct net_device *netdev;
189 struct nfp_repr *repr;
190 u32 netdev_port_id;
191
192 msg = nfp_flower_cmsg_get_data(skb);
193 netdev_port_id = be32_to_cpu(msg->head.port);
194 rcu_read_lock();
195 netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
196 if (!netdev)
197 goto exit_unlock_rcu;
198
199 repr = netdev_priv(netdev);
200 repr_priv = repr->app_priv;
201 curr_stats = &repr_priv->qos_table.curr_stats;
202 prev_stats = &repr_priv->qos_table.prev_stats;
203
204 spin_lock_bh(&fl_priv->qos_stats_lock);
205 curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
206 be64_to_cpu(msg->drop_pkts);
207 curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
208 be64_to_cpu(msg->drop_bytes);
209
210 if (!repr_priv->qos_table.last_update) {
211 prev_stats->pkts = curr_stats->pkts;
212 prev_stats->bytes = curr_stats->bytes;
213 }
214
215 repr_priv->qos_table.last_update = jiffies;
216 spin_unlock_bh(&fl_priv->qos_stats_lock);
217
218 exit_unlock_rcu:
219 rcu_read_unlock();
220 }
221
222 static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv * fl_priv,u32 netdev_port_id)223 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
224 u32 netdev_port_id)
225 {
226 struct nfp_police_cfg_head *head;
227 struct sk_buff *skb;
228
229 skb = nfp_flower_cmsg_alloc(fl_priv->app,
230 sizeof(struct nfp_police_cfg_head),
231 NFP_FLOWER_CMSG_TYPE_QOS_STATS,
232 GFP_ATOMIC);
233 if (!skb)
234 return;
235
236 head = nfp_flower_cmsg_get_data(skb);
237 memset(head, 0, sizeof(struct nfp_police_cfg_head));
238 head->port = cpu_to_be32(netdev_port_id);
239
240 nfp_ctrl_tx(fl_priv->app->ctrl, skb);
241 }
242
243 static void
nfp_flower_stats_rlim_request_all(struct nfp_flower_priv * fl_priv)244 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
245 {
246 struct nfp_reprs *repr_set;
247 int i;
248
249 rcu_read_lock();
250 repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
251 if (!repr_set)
252 goto exit_unlock_rcu;
253
254 for (i = 0; i < repr_set->num_reprs; i++) {
255 struct net_device *netdev;
256
257 netdev = rcu_dereference(repr_set->reprs[i]);
258 if (netdev) {
259 struct nfp_repr *priv = netdev_priv(netdev);
260 struct nfp_flower_repr_priv *repr_priv;
261 u32 netdev_port_id;
262
263 repr_priv = priv->app_priv;
264 netdev_port_id = repr_priv->qos_table.netdev_port_id;
265 if (!netdev_port_id)
266 continue;
267
268 nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
269 }
270 }
271
272 exit_unlock_rcu:
273 rcu_read_unlock();
274 }
275
update_stats_cache(struct work_struct * work)276 static void update_stats_cache(struct work_struct *work)
277 {
278 struct delayed_work *delayed_work;
279 struct nfp_flower_priv *fl_priv;
280
281 delayed_work = to_delayed_work(work);
282 fl_priv = container_of(delayed_work, struct nfp_flower_priv,
283 qos_stats_work);
284
285 nfp_flower_stats_rlim_request_all(fl_priv);
286 schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
287 }
288
289 static int
nfp_flower_stats_rate_limiter(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow,struct netlink_ext_ack * extack)290 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
291 struct tc_cls_matchall_offload *flow,
292 struct netlink_ext_ack *extack)
293 {
294 struct nfp_flower_priv *fl_priv = app->priv;
295 struct nfp_flower_repr_priv *repr_priv;
296 struct nfp_stat_pair *curr_stats;
297 struct nfp_stat_pair *prev_stats;
298 u64 diff_bytes, diff_pkts;
299 struct nfp_repr *repr;
300
301 if (!nfp_netdev_is_nfp_repr(netdev)) {
302 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
303 return -EOPNOTSUPP;
304 }
305 repr = netdev_priv(netdev);
306
307 repr_priv = repr->app_priv;
308 if (!repr_priv->qos_table.netdev_port_id) {
309 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
310 return -EOPNOTSUPP;
311 }
312
313 spin_lock_bh(&fl_priv->qos_stats_lock);
314 curr_stats = &repr_priv->qos_table.curr_stats;
315 prev_stats = &repr_priv->qos_table.prev_stats;
316 diff_pkts = curr_stats->pkts - prev_stats->pkts;
317 diff_bytes = curr_stats->bytes - prev_stats->bytes;
318 prev_stats->pkts = curr_stats->pkts;
319 prev_stats->bytes = curr_stats->bytes;
320 spin_unlock_bh(&fl_priv->qos_stats_lock);
321
322 flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
323 repr_priv->qos_table.last_update,
324 FLOW_ACTION_HW_STATS_DELAYED);
325 return 0;
326 }
327
nfp_flower_qos_init(struct nfp_app * app)328 void nfp_flower_qos_init(struct nfp_app *app)
329 {
330 struct nfp_flower_priv *fl_priv = app->priv;
331
332 spin_lock_init(&fl_priv->qos_stats_lock);
333 INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
334 }
335
nfp_flower_qos_cleanup(struct nfp_app * app)336 void nfp_flower_qos_cleanup(struct nfp_app *app)
337 {
338 struct nfp_flower_priv *fl_priv = app->priv;
339
340 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
341 }
342
nfp_flower_setup_qos_offload(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow)343 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
344 struct tc_cls_matchall_offload *flow)
345 {
346 struct netlink_ext_ack *extack = flow->common.extack;
347 struct nfp_flower_priv *fl_priv = app->priv;
348
349 if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
350 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
351 return -EOPNOTSUPP;
352 }
353
354 switch (flow->command) {
355 case TC_CLSMATCHALL_REPLACE:
356 return nfp_flower_install_rate_limiter(app, netdev, flow,
357 extack);
358 case TC_CLSMATCHALL_DESTROY:
359 return nfp_flower_remove_rate_limiter(app, netdev, flow,
360 extack);
361 case TC_CLSMATCHALL_STATS:
362 return nfp_flower_stats_rate_limiter(app, netdev, flow,
363 extack);
364 default:
365 return -EOPNOTSUPP;
366 }
367 }
368