1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Macros for SMC statistics 6 * 7 * Copyright IBM Corp. 2021 8 * 9 * Author(s): Guvenc Gulce 10 */ 11 12 #ifndef NET_SMC_SMC_STATS_H_ 13 #define NET_SMC_SMC_STATS_H_ 14 #include <linux/init.h> 15 #include <linux/mutex.h> 16 #include <linux/percpu.h> 17 #include <linux/ctype.h> 18 #include <linux/smc.h> 19 20 #include "smc_clc.h" 21 22 #define SMC_MAX_FBACK_RSN_CNT 30 23 24 enum { 25 SMC_BUF_8K, 26 SMC_BUF_16K, 27 SMC_BUF_32K, 28 SMC_BUF_64K, 29 SMC_BUF_128K, 30 SMC_BUF_256K, 31 SMC_BUF_512K, 32 SMC_BUF_1024K, 33 SMC_BUF_G_1024K, 34 SMC_BUF_MAX, 35 }; 36 37 struct smc_stats_fback { 38 int fback_code; 39 u16 count; 40 }; 41 42 struct smc_stats_rsn { 43 struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT]; 44 struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT]; 45 u64 srv_fback_cnt; 46 u64 clnt_fback_cnt; 47 }; 48 49 struct smc_stats_rmbcnt { 50 u64 buf_size_small_peer_cnt; 51 u64 buf_size_small_cnt; 52 u64 buf_full_peer_cnt; 53 u64 buf_full_cnt; 54 u64 reuse_cnt; 55 u64 alloc_cnt; 56 u64 dgrade_cnt; 57 }; 58 59 struct smc_stats_memsize { 60 u64 buf[SMC_BUF_MAX]; 61 }; 62 63 struct smc_stats_tech { 64 struct smc_stats_memsize tx_rmbsize; 65 struct smc_stats_memsize rx_rmbsize; 66 struct smc_stats_memsize tx_pd; 67 struct smc_stats_memsize rx_pd; 68 struct smc_stats_rmbcnt rmb_tx; 69 struct smc_stats_rmbcnt rmb_rx; 70 u64 clnt_v1_succ_cnt; 71 u64 clnt_v2_succ_cnt; 72 u64 srv_v1_succ_cnt; 73 u64 srv_v2_succ_cnt; 74 u64 sendpage_cnt; 75 u64 urg_data_cnt; 76 u64 splice_cnt; 77 u64 cork_cnt; 78 u64 ndly_cnt; 79 u64 rx_bytes; 80 u64 tx_bytes; 81 u64 rx_cnt; 82 u64 tx_cnt; 83 }; 84 85 struct smc_stats { 86 struct smc_stats_tech smc[2]; 87 u64 clnt_hshake_err_cnt; 88 u64 srv_hshake_err_cnt; 89 }; 90 91 #define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \ 92 do { \ 93 typeof(_smc_stats) stats = (_smc_stats); \ 94 typeof(_tech) t = (_tech); \ 95 typeof(_len) l = (_len); \ 96 int _pos; \ 97 typeof(_rc) r = (_rc); \ 98 int m = SMC_BUF_MAX - 1; \ 99 this_cpu_inc((*stats).smc[t].key ## _cnt); \ 100 if (r <= 0 || l <= 0) \ 101 break; \ 102 _pos = fls64((l - 1) >> 13); \ 103 _pos = (_pos <= m) ? _pos : m; \ 104 this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \ 105 this_cpu_add((*stats).smc[t].key ## _bytes, r); \ 106 } \ 107 while (0) 108 109 #define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \ 110 do { \ 111 typeof(_smc) __smc = _smc; \ 112 struct net *_net = sock_net(&__smc->sk); \ 113 struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \ 114 typeof(length) _len = (length); \ 115 typeof(rcode) _rc = (rcode); \ 116 bool is_smcd = !__smc->conn.lnk; \ 117 if (is_smcd) \ 118 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \ 119 else \ 120 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \ 121 } \ 122 while (0) 123 124 #define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \ 125 do { \ 126 typeof(_smc) __smc = _smc; \ 127 struct net *_net = sock_net(&__smc->sk); \ 128 struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \ 129 typeof(length) _len = (length); \ 130 typeof(rcode) _rc = (rcode); \ 131 bool is_smcd = !__smc->conn.lnk; \ 132 if (is_smcd) \ 133 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \ 134 else \ 135 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \ 136 } \ 137 while (0) 138 139 #define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len) \ 140 do { \ 141 typeof(_len) _l = (_len); \ 142 typeof(_tech) t = (_tech); \ 143 int _pos; \ 144 int m = SMC_BUF_MAX - 1; \ 145 if (_l <= 0) \ 146 break; \ 147 _pos = fls((_l - 1) >> 13); \ 148 _pos = (_pos <= m) ? _pos : m; \ 149 this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \ 150 } \ 151 while (0) 152 153 #define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \ 154 this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt) 155 156 #define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _len) \ 157 do { \ 158 struct net *_net = sock_net(&(_smc)->sk); \ 159 struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \ 160 typeof(_is_smcd) is_d = (_is_smcd); \ 161 typeof(_is_rx) is_r = (_is_rx); \ 162 typeof(_len) l = (_len); \ 163 if ((is_d) && (is_r)) \ 164 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, l); \ 165 if ((is_d) && !(is_r)) \ 166 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, l); \ 167 if (!(is_d) && (is_r)) \ 168 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, l); \ 169 if (!(is_d) && !(is_r)) \ 170 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, l); \ 171 } \ 172 while (0) 173 174 #define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \ 175 do { \ 176 struct net *net = sock_net(&(_smc)->sk); \ 177 struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \ 178 typeof(_is_smcd) is_d = (_is_smcd); \ 179 typeof(_is_rx) is_r = (_is_rx); \ 180 if ((is_d) && (is_r)) \ 181 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \ 182 if ((is_d) && !(is_r)) \ 183 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \ 184 if (!(is_d) && (is_r)) \ 185 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \ 186 if (!(is_d) && !(is_r)) \ 187 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \ 188 } \ 189 while (0) 190 191 #define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \ 192 SMC_STAT_RMB(smc, reuse, is_smcd, is_rx) 193 194 #define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \ 195 SMC_STAT_RMB(smc, alloc, is_smcd, is_rx) 196 197 #define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \ 198 SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx) 199 200 #define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \ 201 SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false) 202 203 #define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \ 204 SMC_STAT_RMB(smc, buf_full, is_smcd, false) 205 206 #define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \ 207 SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false) 208 209 #define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \ 210 SMC_STAT_RMB(smc, buf_size_small, is_smcd, false) 211 212 #define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \ 213 SMC_STAT_RMB(smc, buf_size_small, is_smcd, true) 214 215 #define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \ 216 SMC_STAT_RMB(smc, buf_full, is_smcd, true) 217 218 #define SMC_STAT_INC(_smc, type) \ 219 do { \ 220 typeof(_smc) __smc = _smc; \ 221 bool is_smcd = !(__smc)->conn.lnk; \ 222 struct net *net = sock_net(&(__smc)->sk); \ 223 struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \ 224 if ((is_smcd)) \ 225 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \ 226 else \ 227 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \ 228 } \ 229 while (0) 230 231 #define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \ 232 do { \ 233 typeof(_aclc) acl = (_aclc); \ 234 bool is_v2 = (acl->hdr.version == SMC_V2); \ 235 bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \ 236 struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \ 237 if (is_v2 && is_smcd) \ 238 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \ 239 else if (is_v2 && !is_smcd) \ 240 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \ 241 else if (!is_v2 && is_smcd) \ 242 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \ 243 else if (!is_v2 && !is_smcd) \ 244 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \ 245 } \ 246 while (0) 247 248 #define SMC_STAT_SERV_SUCC_INC(net, _ini) \ 249 do { \ 250 typeof(_ini) i = (_ini); \ 251 bool is_v2 = (i->smcd_version & SMC_V2); \ 252 bool is_smcd = (i->is_smcd); \ 253 typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \ 254 if (is_v2 && is_smcd) \ 255 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \ 256 else if (is_v2 && !is_smcd) \ 257 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \ 258 else if (!is_v2 && is_smcd) \ 259 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \ 260 else if (!is_v2 && !is_smcd) \ 261 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \ 262 } \ 263 while (0) 264 265 int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb); 266 int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb); 267 int smc_stats_init(struct net *net); 268 void smc_stats_exit(struct net *net); 269 270 #endif /* NET_SMC_SMC_STATS_H_ */ 271