1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVM Express target device driver tracepoints
4 * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
5 */
6
7 #include <asm/unaligned.h>
8 #include "trace.h"
9
nvmet_trace_admin_identify(struct trace_seq * p,u8 * cdw10)10 static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
11 {
12 const char *ret = trace_seq_buffer_ptr(p);
13 u8 cns = cdw10[0];
14 u16 ctrlid = get_unaligned_le16(cdw10 + 2);
15
16 trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
17 trace_seq_putc(p, 0);
18
19 return ret;
20 }
21
nvmet_trace_admin_get_features(struct trace_seq * p,u8 * cdw10)22 static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
23 u8 *cdw10)
24 {
25 const char *ret = trace_seq_buffer_ptr(p);
26 u8 fid = cdw10[0];
27 u8 sel = cdw10[1] & 0x7;
28 u32 cdw11 = get_unaligned_le32(cdw10 + 4);
29
30 trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
31 trace_seq_putc(p, 0);
32
33 return ret;
34 }
35
nvmet_trace_get_lba_status(struct trace_seq * p,u8 * cdw10)36 static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
37 u8 *cdw10)
38 {
39 const char *ret = trace_seq_buffer_ptr(p);
40 u64 slba = get_unaligned_le64(cdw10);
41 u32 mndw = get_unaligned_le32(cdw10 + 8);
42 u16 rl = get_unaligned_le16(cdw10 + 12);
43 u8 atype = cdw10[15];
44
45 trace_seq_printf(p, "slba=0x%llx, mndw=0x%x, rl=0x%x, atype=%u",
46 slba, mndw, rl, atype);
47 trace_seq_putc(p, 0);
48
49 return ret;
50 }
51
nvmet_trace_read_write(struct trace_seq * p,u8 * cdw10)52 static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
53 {
54 const char *ret = trace_seq_buffer_ptr(p);
55 u64 slba = get_unaligned_le64(cdw10);
56 u16 length = get_unaligned_le16(cdw10 + 8);
57 u16 control = get_unaligned_le16(cdw10 + 10);
58 u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
59 u32 reftag = get_unaligned_le32(cdw10 + 16);
60
61 trace_seq_printf(p,
62 "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
63 slba, length, control, dsmgmt, reftag);
64 trace_seq_putc(p, 0);
65
66 return ret;
67 }
68
nvmet_trace_dsm(struct trace_seq * p,u8 * cdw10)69 static const char *nvmet_trace_dsm(struct trace_seq *p, u8 *cdw10)
70 {
71 const char *ret = trace_seq_buffer_ptr(p);
72
73 trace_seq_printf(p, "nr=%u, attributes=%u",
74 get_unaligned_le32(cdw10),
75 get_unaligned_le32(cdw10 + 4));
76 trace_seq_putc(p, 0);
77
78 return ret;
79 }
80
nvmet_trace_common(struct trace_seq * p,u8 * cdw10)81 static const char *nvmet_trace_common(struct trace_seq *p, u8 *cdw10)
82 {
83 const char *ret = trace_seq_buffer_ptr(p);
84
85 trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
86 trace_seq_putc(p, 0);
87
88 return ret;
89 }
90
nvmet_trace_parse_admin_cmd(struct trace_seq * p,u8 opcode,u8 * cdw10)91 const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
92 u8 opcode, u8 *cdw10)
93 {
94 switch (opcode) {
95 case nvme_admin_identify:
96 return nvmet_trace_admin_identify(p, cdw10);
97 case nvme_admin_get_features:
98 return nvmet_trace_admin_get_features(p, cdw10);
99 case nvme_admin_get_lba_status:
100 return nvmet_trace_get_lba_status(p, cdw10);
101 default:
102 return nvmet_trace_common(p, cdw10);
103 }
104 }
105
nvmet_trace_parse_nvm_cmd(struct trace_seq * p,u8 opcode,u8 * cdw10)106 const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
107 u8 opcode, u8 *cdw10)
108 {
109 switch (opcode) {
110 case nvme_cmd_read:
111 case nvme_cmd_write:
112 case nvme_cmd_write_zeroes:
113 return nvmet_trace_read_write(p, cdw10);
114 case nvme_cmd_dsm:
115 return nvmet_trace_dsm(p, cdw10);
116 default:
117 return nvmet_trace_common(p, cdw10);
118 }
119 }
120
nvmet_trace_fabrics_property_set(struct trace_seq * p,u8 * spc)121 static const char *nvmet_trace_fabrics_property_set(struct trace_seq *p,
122 u8 *spc)
123 {
124 const char *ret = trace_seq_buffer_ptr(p);
125 u8 attrib = spc[0];
126 u32 ofst = get_unaligned_le32(spc + 4);
127 u64 value = get_unaligned_le64(spc + 8);
128
129 trace_seq_printf(p, "attrib=%u, ofst=0x%x, value=0x%llx",
130 attrib, ofst, value);
131 trace_seq_putc(p, 0);
132 return ret;
133 }
134
nvmet_trace_fabrics_connect(struct trace_seq * p,u8 * spc)135 static const char *nvmet_trace_fabrics_connect(struct trace_seq *p,
136 u8 *spc)
137 {
138 const char *ret = trace_seq_buffer_ptr(p);
139 u16 recfmt = get_unaligned_le16(spc);
140 u16 qid = get_unaligned_le16(spc + 2);
141 u16 sqsize = get_unaligned_le16(spc + 4);
142 u8 cattr = spc[6];
143 u32 kato = get_unaligned_le32(spc + 8);
144
145 trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u",
146 recfmt, qid, sqsize, cattr, kato);
147 trace_seq_putc(p, 0);
148 return ret;
149 }
150
nvmet_trace_fabrics_property_get(struct trace_seq * p,u8 * spc)151 static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
152 u8 *spc)
153 {
154 const char *ret = trace_seq_buffer_ptr(p);
155 u8 attrib = spc[0];
156 u32 ofst = get_unaligned_le32(spc + 4);
157
158 trace_seq_printf(p, "attrib=%u, ofst=0x%x", attrib, ofst);
159 trace_seq_putc(p, 0);
160 return ret;
161 }
162
nvmet_trace_fabrics_common(struct trace_seq * p,u8 * spc)163 static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
164 {
165 const char *ret = trace_seq_buffer_ptr(p);
166
167 trace_seq_printf(p, "specific=%*ph", 24, spc);
168 trace_seq_putc(p, 0);
169 return ret;
170 }
171
nvmet_trace_parse_fabrics_cmd(struct trace_seq * p,u8 fctype,u8 * spc)172 const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
173 u8 fctype, u8 *spc)
174 {
175 switch (fctype) {
176 case nvme_fabrics_type_property_set:
177 return nvmet_trace_fabrics_property_set(p, spc);
178 case nvme_fabrics_type_connect:
179 return nvmet_trace_fabrics_connect(p, spc);
180 case nvme_fabrics_type_property_get:
181 return nvmet_trace_fabrics_property_get(p, spc);
182 default:
183 return nvmet_trace_fabrics_common(p, spc);
184 }
185 }
186
nvmet_trace_disk_name(struct trace_seq * p,char * name)187 const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
188 {
189 const char *ret = trace_seq_buffer_ptr(p);
190
191 if (*name)
192 trace_seq_printf(p, "disk=%s, ", name);
193 trace_seq_putc(p, 0);
194
195 return ret;
196 }
197
nvmet_trace_ctrl_name(struct trace_seq * p,struct nvmet_ctrl * ctrl)198 const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
199 {
200 const char *ret = trace_seq_buffer_ptr(p);
201
202 /*
203 * XXX: We don't know the controller instance before executing the
204 * connect command itself because the connect command for the admin
205 * queue will not provide the cntlid which will be allocated in this
206 * command. In case of io queues, the controller instance will be
207 * mapped by the extra data of the connect command.
208 * If we can know the extra data of the connect command in this stage,
209 * we can update this print statement later.
210 */
211 if (ctrl)
212 trace_seq_printf(p, "%d", ctrl->cntlid);
213 else
214 trace_seq_printf(p, "_");
215 trace_seq_putc(p, 0);
216
217 return ret;
218 }
219
220