• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  */
17 
18 #include <linux/module.h>
19 #include <linux/mempool.h>
20 #include <linux/errno.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
25 #include "fnic_io.h"
26 #include "fnic.h"
27 
28 unsigned int trace_max_pages;
29 static int fnic_max_trace_entries;
30 
31 static unsigned long fnic_trace_buf_p;
32 static DEFINE_SPINLOCK(fnic_trace_lock);
33 
34 static fnic_trace_dbg_t fnic_trace_entries;
35 int fnic_tracing_enabled = 1;
36 
37 /* static char *fnic_fc_ctlr_trace_buf_p; */
38 
39 static int fc_trace_max_entries;
40 static unsigned long fnic_fc_ctlr_trace_buf_p;
41 static fnic_trace_dbg_t fc_trace_entries;
42 int fnic_fc_tracing_enabled = 1;
43 int fnic_fc_trace_cleared = 1;
44 static DEFINE_SPINLOCK(fnic_fc_trace_lock);
45 
46 
47 /*
48  * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
49  *
50  * Description:
51  * This routine gets next available trace buffer entry location @wr_idx
52  * from allocated trace buffer pages and give that memory location
53  * to user to store the trace information.
54  *
55  * Return Value:
56  * This routine returns pointer to next available trace entry
57  * @fnic_buf_head for user to fill trace information.
58  */
fnic_trace_get_buf(void)59 fnic_trace_data_t *fnic_trace_get_buf(void)
60 {
61 	unsigned long fnic_buf_head;
62 	unsigned long flags;
63 
64 	spin_lock_irqsave(&fnic_trace_lock, flags);
65 
66 	/*
67 	 * Get next available memory location for writing trace information
68 	 * at @wr_idx and increment @wr_idx
69 	 */
70 	fnic_buf_head =
71 		fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
72 	fnic_trace_entries.wr_idx++;
73 
74 	/*
75 	 * Verify if trace buffer is full then change wd_idx to
76 	 * start from zero
77 	 */
78 	if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
79 		fnic_trace_entries.wr_idx = 0;
80 
81 	/*
82 	 * Verify if write index @wr_idx and read index @rd_idx are same then
83 	 * increment @rd_idx to move to next entry in trace buffer
84 	 */
85 	if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
86 		fnic_trace_entries.rd_idx++;
87 		if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
88 			fnic_trace_entries.rd_idx = 0;
89 	}
90 	spin_unlock_irqrestore(&fnic_trace_lock, flags);
91 	return (fnic_trace_data_t *)fnic_buf_head;
92 }
93 
94 /*
95  * fnic_get_trace_data - Copy trace buffer to a memory file
96  * @fnic_dbgfs_t: pointer to debugfs trace buffer
97  *
98  * Description:
99  * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
100  * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
101  * the log and process the log until the end of the buffer. Then it will gather
102  * from the beginning of the log and process until the current entry @wr_idx.
103  *
104  * Return Value:
105  * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
106  */
fnic_get_trace_data(fnic_dbgfs_t * fnic_dbgfs_prt)107 int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
108 {
109 	int rd_idx;
110 	int wr_idx;
111 	int len = 0;
112 	unsigned long flags;
113 	char str[KSYM_SYMBOL_LEN];
114 	struct timespec val;
115 	fnic_trace_data_t *tbp;
116 
117 	spin_lock_irqsave(&fnic_trace_lock, flags);
118 	rd_idx = fnic_trace_entries.rd_idx;
119 	wr_idx = fnic_trace_entries.wr_idx;
120 	if (wr_idx < rd_idx) {
121 		while (1) {
122 			/* Start from read index @rd_idx */
123 			tbp = (fnic_trace_data_t *)
124 				  fnic_trace_entries.page_offset[rd_idx];
125 			if (!tbp) {
126 				spin_unlock_irqrestore(&fnic_trace_lock, flags);
127 				return 0;
128 			}
129 			/* Convert function pointer to function name */
130 			if (sizeof(unsigned long) < 8) {
131 				sprint_symbol(str, tbp->fnaddr.low);
132 				jiffies_to_timespec(tbp->timestamp.low, &val);
133 			} else {
134 				sprint_symbol(str, tbp->fnaddr.val);
135 				jiffies_to_timespec(tbp->timestamp.val, &val);
136 			}
137 			/*
138 			 * Dump trace buffer entry to memory file
139 			 * and increment read index @rd_idx
140 			 */
141 			len += snprintf(fnic_dbgfs_prt->buffer + len,
142 				  (trace_max_pages * PAGE_SIZE * 3) - len,
143 				  "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
144 				  "%16llx %16llx %16llx\n", val.tv_sec,
145 				  val.tv_nsec, str, tbp->host_no, tbp->tag,
146 				  tbp->data[0], tbp->data[1], tbp->data[2],
147 				  tbp->data[3], tbp->data[4]);
148 			rd_idx++;
149 			/*
150 			 * If rd_idx is reached to maximum trace entries
151 			 * then move rd_idx to zero
152 			 */
153 			if (rd_idx > (fnic_max_trace_entries-1))
154 				rd_idx = 0;
155 			/*
156 			 * Continure dumpping trace buffer entries into
157 			 * memory file till rd_idx reaches write index
158 			 */
159 			if (rd_idx == wr_idx)
160 				break;
161 		}
162 	} else if (wr_idx > rd_idx) {
163 		while (1) {
164 			/* Start from read index @rd_idx */
165 			tbp = (fnic_trace_data_t *)
166 				  fnic_trace_entries.page_offset[rd_idx];
167 			if (!tbp) {
168 				spin_unlock_irqrestore(&fnic_trace_lock, flags);
169 				return 0;
170 			}
171 			/* Convert function pointer to function name */
172 			if (sizeof(unsigned long) < 8) {
173 				sprint_symbol(str, tbp->fnaddr.low);
174 				jiffies_to_timespec(tbp->timestamp.low, &val);
175 			} else {
176 				sprint_symbol(str, tbp->fnaddr.val);
177 				jiffies_to_timespec(tbp->timestamp.val, &val);
178 			}
179 			/*
180 			 * Dump trace buffer entry to memory file
181 			 * and increment read index @rd_idx
182 			 */
183 			len += snprintf(fnic_dbgfs_prt->buffer + len,
184 				  (trace_max_pages * PAGE_SIZE * 3) - len,
185 				  "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
186 				  "%16llx %16llx %16llx\n", val.tv_sec,
187 				  val.tv_nsec, str, tbp->host_no, tbp->tag,
188 				  tbp->data[0], tbp->data[1], tbp->data[2],
189 				  tbp->data[3], tbp->data[4]);
190 			rd_idx++;
191 			/*
192 			 * Continue dumpping trace buffer entries into
193 			 * memory file till rd_idx reaches write index
194 			 */
195 			if (rd_idx == wr_idx)
196 				break;
197 		}
198 	}
199 	spin_unlock_irqrestore(&fnic_trace_lock, flags);
200 	return len;
201 }
202 
203 /*
204  * fnic_get_stats_data - Copy fnic stats buffer to a memory file
205  * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
206  *
207  * Description:
208  * This routine gathers the fnic stats debugfs data from the fnic_stats struct
209  * and dumps it to stats_debug_info.
210  *
211  * Return Value:
212  * This routine returns the amount of bytes that were dumped into
213  * stats_debug_info
214  */
fnic_get_stats_data(struct stats_debug_info * debug,struct fnic_stats * stats)215 int fnic_get_stats_data(struct stats_debug_info *debug,
216 			struct fnic_stats *stats)
217 {
218 	int len = 0;
219 	int buf_size = debug->buf_size;
220 	struct timespec val1, val2;
221 
222 	len = snprintf(debug->debug_buffer + len, buf_size - len,
223 		  "------------------------------------------\n"
224 		  "\t\tIO Statistics\n"
225 		  "------------------------------------------\n");
226 	len += snprintf(debug->debug_buffer + len, buf_size - len,
227 		  "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
228 		  "Number of IOs: %lld\nNumber of IO Completions: %lld\n"
229 		  "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
230 		  "Number of Memory alloc Failures: %lld\n"
231 		  "Number of IOREQ Null: %lld\n"
232 		  "Number of SCSI cmd pointer Null: %lld\n",
233 		  (u64)atomic64_read(&stats->io_stats.active_ios),
234 		  (u64)atomic64_read(&stats->io_stats.max_active_ios),
235 		  (u64)atomic64_read(&stats->io_stats.num_ios),
236 		  (u64)atomic64_read(&stats->io_stats.io_completions),
237 		  (u64)atomic64_read(&stats->io_stats.io_failures),
238 		  (u64)atomic64_read(&stats->io_stats.io_not_found),
239 		  (u64)atomic64_read(&stats->io_stats.alloc_failures),
240 		  (u64)atomic64_read(&stats->io_stats.ioreq_null),
241 		  (u64)atomic64_read(&stats->io_stats.sc_null));
242 
243 	len += snprintf(debug->debug_buffer + len, buf_size - len,
244 		  "\n------------------------------------------\n"
245 		  "\t\tAbort Statistics\n"
246 		  "------------------------------------------\n");
247 	len += snprintf(debug->debug_buffer + len, buf_size - len,
248 		  "Number of Aborts: %lld\n"
249 		  "Number of Abort Failures: %lld\n"
250 		  "Number of Abort Driver Timeouts: %lld\n"
251 		  "Number of Abort FW Timeouts: %lld\n"
252 		  "Number of Abort IO NOT Found: %lld\n",
253 		  (u64)atomic64_read(&stats->abts_stats.aborts),
254 		  (u64)atomic64_read(&stats->abts_stats.abort_failures),
255 		  (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
256 		  (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
257 		  (u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
258 
259 	len += snprintf(debug->debug_buffer + len, buf_size - len,
260 		  "\n------------------------------------------\n"
261 		  "\t\tTerminate Statistics\n"
262 		  "------------------------------------------\n");
263 	len += snprintf(debug->debug_buffer + len, buf_size - len,
264 		  "Number of Terminates: %lld\n"
265 		  "Maximum Terminates: %lld\n"
266 		  "Number of Terminate Driver Timeouts: %lld\n"
267 		  "Number of Terminate FW Timeouts: %lld\n"
268 		  "Number of Terminate IO NOT Found: %lld\n"
269 		  "Number of Terminate Failures: %lld\n",
270 		  (u64)atomic64_read(&stats->term_stats.terminates),
271 		  (u64)atomic64_read(&stats->term_stats.max_terminates),
272 		  (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
273 		  (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
274 		  (u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
275 		  (u64)atomic64_read(&stats->term_stats.terminate_failures));
276 
277 	len += snprintf(debug->debug_buffer + len, buf_size - len,
278 		  "\n------------------------------------------\n"
279 		  "\t\tReset Statistics\n"
280 		  "------------------------------------------\n");
281 
282 	len += snprintf(debug->debug_buffer + len, buf_size - len,
283 		  "Number of Device Resets: %lld\n"
284 		  "Number of Device Reset Failures: %lld\n"
285 		  "Number of Device Reset Aborts: %lld\n"
286 		  "Number of Device Reset Timeouts: %lld\n"
287 		  "Number of Device Reset Terminates: %lld\n"
288 		  "Number of FW Resets: %lld\n"
289 		  "Number of FW Reset Completions: %lld\n"
290 		  "Number of FW Reset Failures: %lld\n"
291 		  "Number of Fnic Reset: %lld\n"
292 		  "Number of Fnic Reset Completions: %lld\n"
293 		  "Number of Fnic Reset Failures: %lld\n",
294 		  (u64)atomic64_read(&stats->reset_stats.device_resets),
295 		  (u64)atomic64_read(&stats->reset_stats.device_reset_failures),
296 		  (u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
297 		  (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
298 		  (u64)atomic64_read(
299 			  &stats->reset_stats.device_reset_terminates),
300 		  (u64)atomic64_read(&stats->reset_stats.fw_resets),
301 		  (u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
302 		  (u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
303 		  (u64)atomic64_read(&stats->reset_stats.fnic_resets),
304 		  (u64)atomic64_read(
305 			  &stats->reset_stats.fnic_reset_completions),
306 		  (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
307 
308 	len += snprintf(debug->debug_buffer + len, buf_size - len,
309 		  "\n------------------------------------------\n"
310 		  "\t\tFirmware Statistics\n"
311 		  "------------------------------------------\n");
312 
313 	len += snprintf(debug->debug_buffer + len, buf_size - len,
314 		  "Number of Active FW Requests %lld\n"
315 		  "Maximum FW Requests: %lld\n"
316 		  "Number of FW out of resources: %lld\n"
317 		  "Number of FW IO errors: %lld\n",
318 		  (u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
319 		  (u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
320 		  (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
321 		  (u64)atomic64_read(&stats->fw_stats.io_fw_errs));
322 
323 	len += snprintf(debug->debug_buffer + len, buf_size - len,
324 		  "\n------------------------------------------\n"
325 		  "\t\tVlan Discovery Statistics\n"
326 		  "------------------------------------------\n");
327 
328 	len += snprintf(debug->debug_buffer + len, buf_size - len,
329 		  "Number of Vlan Discovery Requests Sent %lld\n"
330 		  "Vlan Response Received with no FCF VLAN ID: %lld\n"
331 		  "No solicitations recvd after vlan set, expiry count: %lld\n"
332 		  "Flogi rejects count: %lld\n",
333 		  (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
334 		  (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
335 		  (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
336 		  (u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
337 
338 	len += snprintf(debug->debug_buffer + len, buf_size - len,
339 		  "\n------------------------------------------\n"
340 		  "\t\tOther Important Statistics\n"
341 		  "------------------------------------------\n");
342 
343 	jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
344 	jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
345 
346 	len += snprintf(debug->debug_buffer + len, buf_size - len,
347 		  "Last ISR time: %llu (%8lu.%8lu)\n"
348 		  "Last ACK time: %llu (%8lu.%8lu)\n"
349 		  "Number of ISRs: %lld\n"
350 		  "Maximum CQ Entries: %lld\n"
351 		  "Number of ACK index out of range: %lld\n"
352 		  "Number of data count mismatch: %lld\n"
353 		  "Number of FCPIO Timeouts: %lld\n"
354 		  "Number of FCPIO Aborted: %lld\n"
355 		  "Number of SGL Invalid: %lld\n"
356 		  "Number of Copy WQ Alloc Failures for ABTs: %lld\n"
357 		  "Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
358 		  "Number of Copy WQ Alloc Failures for IOs: %lld\n"
359 		  "Number of no icmnd itmf Completions: %lld\n"
360 		  "Number of QUEUE Fulls: %lld\n"
361 		  "Number of rport not ready: %lld\n"
362 		  "Number of receive frame errors: %lld\n",
363 		  (u64)stats->misc_stats.last_isr_time,
364 		  val1.tv_sec, val1.tv_nsec,
365 		  (u64)stats->misc_stats.last_ack_time,
366 		  val2.tv_sec, val2.tv_nsec,
367 		  (u64)atomic64_read(&stats->misc_stats.isr_count),
368 		  (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
369 		  (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
370 		  (u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
371 		  (u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
372 		  (u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
373 		  (u64)atomic64_read(&stats->misc_stats.sgl_invalid),
374 		  (u64)atomic64_read(
375 			  &stats->misc_stats.abts_cpwq_alloc_failures),
376 		  (u64)atomic64_read(
377 			  &stats->misc_stats.devrst_cpwq_alloc_failures),
378 		  (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
379 		  (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
380 		  (u64)atomic64_read(&stats->misc_stats.queue_fulls),
381 		  (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
382 		  (u64)atomic64_read(&stats->misc_stats.frame_errors));
383 
384 	return len;
385 
386 }
387 
388 /*
389  * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
390  *
391  * Description:
392  * Initialize trace buffer data structure by allocating required memory and
393  * setting page_offset information for every trace entry by adding trace entry
394  * length to previous page_offset value.
395  */
fnic_trace_buf_init(void)396 int fnic_trace_buf_init(void)
397 {
398 	unsigned long fnic_buf_head;
399 	int i;
400 	int err = 0;
401 
402 	trace_max_pages = fnic_trace_max_pages;
403 	fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
404 					  FNIC_ENTRY_SIZE_BYTES;
405 
406 	fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
407 	if (!fnic_trace_buf_p) {
408 		printk(KERN_ERR PFX "Failed to allocate memory "
409 				  "for fnic_trace_buf_p\n");
410 		err = -ENOMEM;
411 		goto err_fnic_trace_buf_init;
412 	}
413 	memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
414 
415 	fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
416 						  sizeof(unsigned long));
417 	if (!fnic_trace_entries.page_offset) {
418 		printk(KERN_ERR PFX "Failed to allocate memory for"
419 				  " page_offset\n");
420 		if (fnic_trace_buf_p) {
421 			vfree((void *)fnic_trace_buf_p);
422 			fnic_trace_buf_p = 0;
423 		}
424 		err = -ENOMEM;
425 		goto err_fnic_trace_buf_init;
426 	}
427 	memset((void *)fnic_trace_entries.page_offset, 0,
428 		  (fnic_max_trace_entries * sizeof(unsigned long)));
429 	fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
430 	fnic_buf_head = fnic_trace_buf_p;
431 
432 	/*
433 	 * Set page_offset field of fnic_trace_entries struct by
434 	 * calculating memory location for every trace entry using
435 	 * length of each trace entry
436 	 */
437 	for (i = 0; i < fnic_max_trace_entries; i++) {
438 		fnic_trace_entries.page_offset[i] = fnic_buf_head;
439 		fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
440 	}
441 	err = fnic_trace_debugfs_init();
442 	if (err < 0) {
443 		pr_err("fnic: Failed to initialize debugfs for tracing\n");
444 		goto err_fnic_trace_debugfs_init;
445 	}
446 	pr_info("fnic: Successfully Initialized Trace Buffer\n");
447 	return err;
448 err_fnic_trace_debugfs_init:
449 	fnic_trace_free();
450 err_fnic_trace_buf_init:
451 	return err;
452 }
453 
454 /*
455  * fnic_trace_free - Free memory of fnic trace data structures.
456  */
fnic_trace_free(void)457 void fnic_trace_free(void)
458 {
459 	fnic_tracing_enabled = 0;
460 	fnic_trace_debugfs_terminate();
461 	if (fnic_trace_entries.page_offset) {
462 		vfree((void *)fnic_trace_entries.page_offset);
463 		fnic_trace_entries.page_offset = NULL;
464 	}
465 	if (fnic_trace_buf_p) {
466 		vfree((void *)fnic_trace_buf_p);
467 		fnic_trace_buf_p = 0;
468 	}
469 	printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
470 }
471 
472 /*
473  * fnic_fc_ctlr_trace_buf_init -
474  * Initialize trace buffer to log fnic control frames
475  * Description:
476  * Initialize trace buffer data structure by allocating
477  * required memory for trace data as well as for Indexes.
478  * Frame size is 256 bytes and
479  * memory is allocated for 1024 entries of 256 bytes.
480  * Page_offset(Index) is set to the address of trace entry
481  * and page_offset is initialized by adding frame size
482  * to the previous page_offset entry.
483  */
484 
fnic_fc_trace_init(void)485 int fnic_fc_trace_init(void)
486 {
487 	unsigned long fc_trace_buf_head;
488 	int err = 0;
489 	int i;
490 
491 	fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
492 				FC_TRC_SIZE_BYTES;
493 	fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
494 					fnic_fc_trace_max_pages * PAGE_SIZE);
495 	if (!fnic_fc_ctlr_trace_buf_p) {
496 		pr_err("fnic: Failed to allocate memory for "
497 		       "FC Control Trace Buf\n");
498 		err = -ENOMEM;
499 		goto err_fnic_fc_ctlr_trace_buf_init;
500 	}
501 
502 	memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
503 			fnic_fc_trace_max_pages * PAGE_SIZE);
504 
505 	/* Allocate memory for page offset */
506 	fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
507 						sizeof(unsigned long));
508 	if (!fc_trace_entries.page_offset) {
509 		pr_err("fnic:Failed to allocate memory for page_offset\n");
510 		if (fnic_fc_ctlr_trace_buf_p) {
511 			pr_err("fnic: Freeing FC Control Trace Buf\n");
512 			vfree((void *)fnic_fc_ctlr_trace_buf_p);
513 			fnic_fc_ctlr_trace_buf_p = 0;
514 		}
515 		err = -ENOMEM;
516 		goto err_fnic_fc_ctlr_trace_buf_init;
517 	}
518 	memset((void *)fc_trace_entries.page_offset, 0,
519 	       (fc_trace_max_entries * sizeof(unsigned long)));
520 
521 	fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
522 	fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
523 
524 	/*
525 	* Set up fc_trace_entries.page_offset field with memory location
526 	* for every trace entry
527 	*/
528 	for (i = 0; i < fc_trace_max_entries; i++) {
529 		fc_trace_entries.page_offset[i] = fc_trace_buf_head;
530 		fc_trace_buf_head += FC_TRC_SIZE_BYTES;
531 	}
532 	err = fnic_fc_trace_debugfs_init();
533 	if (err < 0) {
534 		pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
535 		goto err_fnic_fc_ctlr_trace_debugfs_init;
536 	}
537 	pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
538 	return err;
539 
540 err_fnic_fc_ctlr_trace_debugfs_init:
541 	fnic_fc_trace_free();
542 err_fnic_fc_ctlr_trace_buf_init:
543 	return err;
544 }
545 
546 /*
547  * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
548  */
fnic_fc_trace_free(void)549 void fnic_fc_trace_free(void)
550 {
551 	fnic_fc_tracing_enabled = 0;
552 	fnic_fc_trace_debugfs_terminate();
553 	if (fc_trace_entries.page_offset) {
554 		vfree((void *)fc_trace_entries.page_offset);
555 		fc_trace_entries.page_offset = NULL;
556 	}
557 	if (fnic_fc_ctlr_trace_buf_p) {
558 		vfree((void *)fnic_fc_ctlr_trace_buf_p);
559 		fnic_fc_ctlr_trace_buf_p = 0;
560 	}
561 	pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
562 }
563 
564 /*
565  * fnic_fc_ctlr_set_trace_data:
566  *       Maintain rd & wr idx accordingly and set data
567  * Passed parameters:
568  *       host_no: host number accociated with fnic
569  *       frame_type: send_frame, rece_frame or link event
570  *       fc_frame: pointer to fc_frame
571  *       frame_len: Length of the fc_frame
572  * Description:
573  *   This routine will get next available wr_idx and
574  *   copy all passed trace data to the buffer pointed by wr_idx
575  *   and increment wr_idx. It will also make sure that we dont
576  *   overwrite the entry which we are reading and also
577  *   wrap around if we reach the maximum entries.
578  * Returned Value:
579  *   It will return 0 for success or -1 for failure
580  */
fnic_fc_trace_set_data(u32 host_no,u8 frame_type,char * frame,u32 fc_trc_frame_len)581 int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
582 				char *frame, u32 fc_trc_frame_len)
583 {
584 	unsigned long flags;
585 	struct fc_trace_hdr *fc_buf;
586 	unsigned long eth_fcoe_hdr_len;
587 	char *fc_trace;
588 
589 	if (fnic_fc_tracing_enabled == 0)
590 		return 0;
591 
592 	spin_lock_irqsave(&fnic_fc_trace_lock, flags);
593 
594 	if (fnic_fc_trace_cleared == 1) {
595 		fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
596 		pr_info("fnic: Resetting the read idx\n");
597 		memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
598 				fnic_fc_trace_max_pages * PAGE_SIZE);
599 		fnic_fc_trace_cleared = 0;
600 	}
601 
602 	fc_buf = (struct fc_trace_hdr *)
603 		fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
604 
605 	fc_trace_entries.wr_idx++;
606 
607 	if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
608 		fc_trace_entries.wr_idx = 0;
609 
610 	if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
611 		fc_trace_entries.rd_idx++;
612 		if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
613 			fc_trace_entries.rd_idx = 0;
614 	}
615 
616 	fc_buf->time_stamp = CURRENT_TIME;
617 	fc_buf->host_no = host_no;
618 	fc_buf->frame_type = frame_type;
619 
620 	fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
621 
622 	/* During the receive path, we do not have eth hdr as well as fcoe hdr
623 	 * at trace entry point so we will stuff 0xff just to make it generic.
624 	 */
625 	if (frame_type == FNIC_FC_RECV) {
626 		eth_fcoe_hdr_len = sizeof(struct ethhdr) +
627 					sizeof(struct fcoe_hdr);
628 		memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
629 		/* Copy the rest of data frame */
630 		memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
631 		min_t(u8, fc_trc_frame_len,
632 			(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE
633 						- eth_fcoe_hdr_len)));
634 	} else {
635 		memcpy((char *)fc_trace, (void *)frame,
636 		min_t(u8, fc_trc_frame_len,
637 			(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
638 	}
639 
640 	/* Store the actual received length */
641 	fc_buf->frame_len = fc_trc_frame_len;
642 
643 	spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
644 	return 0;
645 }
646 
647 /*
648  * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
649  * Passed parameter:
650  *       @fnic_dbgfs_t: pointer to debugfs trace buffer
651  *       rdata_flag: 1 => Unformated file
652  *                   0 => formated file
653  * Description:
654  *       This routine will copy the trace data to memory file with
655  *       proper formatting and also copy to another memory
656  *       file without formatting for further procesing.
657  * Retrun Value:
658  *       Number of bytes that were dumped into fnic_dbgfs_t
659  */
660 
fnic_fc_trace_get_data(fnic_dbgfs_t * fnic_dbgfs_prt,u8 rdata_flag)661 int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
662 {
663 	int rd_idx, wr_idx;
664 	unsigned long flags;
665 	int len = 0, j;
666 	struct fc_trace_hdr *tdata;
667 	char *fc_trace;
668 
669 	spin_lock_irqsave(&fnic_fc_trace_lock, flags);
670 	if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
671 		spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
672 		pr_info("fnic: Buffer is empty\n");
673 		return 0;
674 	}
675 	rd_idx = fc_trace_entries.rd_idx;
676 	wr_idx = fc_trace_entries.wr_idx;
677 	if (rdata_flag == 0) {
678 		len += snprintf(fnic_dbgfs_prt->buffer + len,
679 			(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
680 			"Time Stamp (UTC)\t\t"
681 			"Host No:   F Type:  len:     FCoE_FRAME:\n");
682 	}
683 
684 	while (rd_idx != wr_idx) {
685 		tdata = (struct fc_trace_hdr *)
686 			fc_trace_entries.page_offset[rd_idx];
687 		if (!tdata) {
688 			pr_info("fnic: Rd data is NULL\n");
689 			spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
690 			return 0;
691 		}
692 		if (rdata_flag == 0) {
693 			copy_and_format_trace_data(tdata,
694 				fnic_dbgfs_prt, &len, rdata_flag);
695 		} else {
696 			fc_trace = (char *)tdata;
697 			for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
698 				len += snprintf(fnic_dbgfs_prt->buffer + len,
699 				(fnic_fc_trace_max_pages * PAGE_SIZE * 3)
700 				- len, "%02x", fc_trace[j] & 0xff);
701 			} /* for loop */
702 			len += snprintf(fnic_dbgfs_prt->buffer + len,
703 				(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
704 				"\n");
705 		}
706 		rd_idx++;
707 		if (rd_idx > (fc_trace_max_entries - 1))
708 			rd_idx = 0;
709 	}
710 
711 	spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
712 	return len;
713 }
714 
715 /*
716  * copy_and_format_trace_data: Copy formatted data to char * buffer
717  * Passed Parameter:
718  *      @fc_trace_hdr_t: pointer to trace data
719  *      @fnic_dbgfs_t: pointer to debugfs trace buffer
720  *      @orig_len: pointer to len
721  *      rdata_flag: 0 => Formated file, 1 => Unformated file
722  * Description:
723  *      This routine will format and copy the passed trace data
724  *      for formated file or unformated file accordingly.
725  */
726 
copy_and_format_trace_data(struct fc_trace_hdr * tdata,fnic_dbgfs_t * fnic_dbgfs_prt,int * orig_len,u8 rdata_flag)727 void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
728 				fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
729 				u8 rdata_flag)
730 {
731 	struct tm tm;
732 	int j, i = 1, len;
733 	char *fc_trace, *fmt;
734 	int ethhdr_len = sizeof(struct ethhdr) - 1;
735 	int fcoehdr_len = sizeof(struct fcoe_hdr);
736 	int fchdr_len = sizeof(struct fc_frame_header);
737 	int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
738 
739 	tdata->frame_type = tdata->frame_type & 0x7F;
740 
741 	len = *orig_len;
742 
743 	time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
744 
745 	fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x       %c%8x\t";
746 	len += snprintf(fnic_dbgfs_prt->buffer + len,
747 		max_size - len,
748 		fmt,
749 		tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
750 		tm.tm_hour, tm.tm_min, tm.tm_sec,
751 		tdata->time_stamp.tv_nsec, tdata->host_no,
752 		tdata->frame_type, tdata->frame_len);
753 
754 	fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
755 
756 	for (j = 0; j < min_t(u8, tdata->frame_len,
757 		(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
758 		if (tdata->frame_type == FNIC_FC_LE) {
759 			len += snprintf(fnic_dbgfs_prt->buffer + len,
760 				max_size - len, "%c", fc_trace[j]);
761 		} else {
762 			len += snprintf(fnic_dbgfs_prt->buffer + len,
763 				max_size - len, "%02x", fc_trace[j] & 0xff);
764 			len += snprintf(fnic_dbgfs_prt->buffer + len,
765 				max_size - len, " ");
766 			if (j == ethhdr_len ||
767 				j == ethhdr_len + fcoehdr_len ||
768 				j == ethhdr_len + fcoehdr_len + fchdr_len ||
769 				(i > 3 && j%fchdr_len == 0)) {
770 				len += snprintf(fnic_dbgfs_prt->buffer
771 					+ len, max_size - len,
772 					"\n\t\t\t\t\t\t\t\t");
773 				i++;
774 			}
775 		} /* end of else*/
776 	} /* End of for loop*/
777 	len += snprintf(fnic_dbgfs_prt->buffer + len,
778 		max_size - len, "\n");
779 	*orig_len = len;
780 }
781