• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/coresight.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/iommu.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include "coresight-catu.h"
13 #include "coresight-priv.h"
14 #include "coresight-tmc.h"
15 
16 struct etr_flat_buf {
17 	struct device	*dev;
18 	dma_addr_t	daddr;
19 	void		*vaddr;
20 	size_t		size;
21 };
22 
23 /*
24  * The TMC ETR SG has a page size of 4K. The SG table contains pointers
25  * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
26  * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
27  * contain more than one SG buffer and tables.
28  *
29  * A table entry has the following format:
30  *
31  * ---Bit31------------Bit4-------Bit1-----Bit0--
32  * |     Address[39:12]    | SBZ |  Entry Type  |
33  * ----------------------------------------------
34  *
35  * Address: Bits [39:12] of a physical page address. Bits [11:0] are
36  *	    always zero.
37  *
38  * Entry type:
39  *	b00 - Reserved.
40  *	b01 - Last entry in the tables, points to 4K page buffer.
41  *	b10 - Normal entry, points to 4K page buffer.
42  *	b11 - Link. The address points to the base of next table.
43  */
44 
45 typedef u32 sgte_t;
46 
47 #define ETR_SG_PAGE_SHIFT		12
48 #define ETR_SG_PAGE_SIZE		(1UL << ETR_SG_PAGE_SHIFT)
49 #define ETR_SG_PAGES_PER_SYSPAGE	(PAGE_SIZE / ETR_SG_PAGE_SIZE)
50 #define ETR_SG_PTRS_PER_PAGE		(ETR_SG_PAGE_SIZE / sizeof(sgte_t))
51 #define ETR_SG_PTRS_PER_SYSPAGE		(PAGE_SIZE / sizeof(sgte_t))
52 
53 #define ETR_SG_ET_MASK			0x3
54 #define ETR_SG_ET_LAST			0x1
55 #define ETR_SG_ET_NORMAL		0x2
56 #define ETR_SG_ET_LINK			0x3
57 
58 #define ETR_SG_ADDR_SHIFT		4
59 
60 #define ETR_SG_ENTRY(addr, type) \
61 	(sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
62 		 (type & ETR_SG_ET_MASK))
63 
64 #define ETR_SG_ADDR(entry) \
65 	(((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
66 #define ETR_SG_ET(entry)		((entry) & ETR_SG_ET_MASK)
67 
68 /*
69  * struct etr_sg_table : ETR SG Table
70  * @sg_table:		Generic SG Table holding the data/table pages.
71  * @hwaddr:		hwaddress used by the TMC, which is the base
72  *			address of the table.
73  */
74 struct etr_sg_table {
75 	struct tmc_sg_table	*sg_table;
76 	dma_addr_t		hwaddr;
77 };
78 
79 /*
80  * tmc_etr_sg_table_entries: Total number of table entries required to map
81  * @nr_pages system pages.
82  *
83  * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
84  * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
85  * with the last entry pointing to another page of table entries.
86  * If we spill over to a new page for mapping 1 entry, we could as
87  * well replace the link entry of the previous page with the last entry.
88  */
89 static inline unsigned long __attribute_const__
tmc_etr_sg_table_entries(int nr_pages)90 tmc_etr_sg_table_entries(int nr_pages)
91 {
92 	unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
93 	unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
94 	/*
95 	 * If we spill over to a new page for 1 entry, we could as well
96 	 * make it the LAST entry in the previous page, skipping the Link
97 	 * address.
98 	 */
99 	if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
100 		nr_sglinks--;
101 	return nr_sgpages + nr_sglinks;
102 }
103 
104 /*
105  * tmc_pages_get_offset:  Go through all the pages in the tmc_pages
106  * and map the device address @addr to an offset within the virtual
107  * contiguous buffer.
108  */
109 static long
tmc_pages_get_offset(struct tmc_pages * tmc_pages,dma_addr_t addr)110 tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
111 {
112 	int i;
113 	dma_addr_t page_start;
114 
115 	for (i = 0; i < tmc_pages->nr_pages; i++) {
116 		page_start = tmc_pages->daddrs[i];
117 		if (addr >= page_start && addr < (page_start + PAGE_SIZE))
118 			return i * PAGE_SIZE + (addr - page_start);
119 	}
120 
121 	return -EINVAL;
122 }
123 
124 /*
125  * tmc_pages_free : Unmap and free the pages used by tmc_pages.
126  * If the pages were not allocated in tmc_pages_alloc(), we would
127  * simply drop the refcount.
128  */
tmc_pages_free(struct tmc_pages * tmc_pages,struct device * dev,enum dma_data_direction dir)129 static void tmc_pages_free(struct tmc_pages *tmc_pages,
130 			   struct device *dev, enum dma_data_direction dir)
131 {
132 	int i;
133 
134 	for (i = 0; i < tmc_pages->nr_pages; i++) {
135 		if (tmc_pages->daddrs && tmc_pages->daddrs[i])
136 			dma_unmap_page(dev, tmc_pages->daddrs[i],
137 					 PAGE_SIZE, dir);
138 		if (tmc_pages->pages && tmc_pages->pages[i])
139 			__free_page(tmc_pages->pages[i]);
140 	}
141 
142 	kfree(tmc_pages->pages);
143 	kfree(tmc_pages->daddrs);
144 	tmc_pages->pages = NULL;
145 	tmc_pages->daddrs = NULL;
146 	tmc_pages->nr_pages = 0;
147 }
148 
149 /*
150  * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
151  * If @pages is not NULL, the list of page virtual addresses are
152  * used as the data pages. The pages are then dma_map'ed for @dev
153  * with dma_direction @dir.
154  *
155  * Returns 0 upon success, else the error number.
156  */
tmc_pages_alloc(struct tmc_pages * tmc_pages,struct device * dev,int node,enum dma_data_direction dir,void ** pages)157 static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
158 			   struct device *dev, int node,
159 			   enum dma_data_direction dir, void **pages)
160 {
161 	int i, nr_pages;
162 	dma_addr_t paddr;
163 	struct page *page;
164 
165 	nr_pages = tmc_pages->nr_pages;
166 	tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
167 					 GFP_KERNEL);
168 	if (!tmc_pages->daddrs)
169 		return -ENOMEM;
170 	tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
171 					 GFP_KERNEL);
172 	if (!tmc_pages->pages) {
173 		kfree(tmc_pages->daddrs);
174 		tmc_pages->daddrs = NULL;
175 		return -ENOMEM;
176 	}
177 
178 	for (i = 0; i < nr_pages; i++) {
179 		if (pages && pages[i]) {
180 			page = virt_to_page(pages[i]);
181 			/* Hold a refcount on the page */
182 			get_page(page);
183 		} else {
184 			page = alloc_pages_node(node,
185 						GFP_KERNEL | __GFP_ZERO, 0);
186 		}
187 		paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
188 		if (dma_mapping_error(dev, paddr))
189 			goto err;
190 		tmc_pages->daddrs[i] = paddr;
191 		tmc_pages->pages[i] = page;
192 	}
193 	return 0;
194 err:
195 	tmc_pages_free(tmc_pages, dev, dir);
196 	return -ENOMEM;
197 }
198 
199 static inline long
tmc_sg_get_data_page_offset(struct tmc_sg_table * sg_table,dma_addr_t addr)200 tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
201 {
202 	return tmc_pages_get_offset(&sg_table->data_pages, addr);
203 }
204 
tmc_free_table_pages(struct tmc_sg_table * sg_table)205 static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
206 {
207 	if (sg_table->table_vaddr)
208 		vunmap(sg_table->table_vaddr);
209 	tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
210 }
211 
tmc_free_data_pages(struct tmc_sg_table * sg_table)212 static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
213 {
214 	if (sg_table->data_vaddr)
215 		vunmap(sg_table->data_vaddr);
216 	tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
217 }
218 
tmc_free_sg_table(struct tmc_sg_table * sg_table)219 void tmc_free_sg_table(struct tmc_sg_table *sg_table)
220 {
221 	tmc_free_table_pages(sg_table);
222 	tmc_free_data_pages(sg_table);
223 }
224 
225 /*
226  * Alloc pages for the table. Since this will be used by the device,
227  * allocate the pages closer to the device (i.e, dev_to_node(dev)
228  * rather than the CPU node).
229  */
tmc_alloc_table_pages(struct tmc_sg_table * sg_table)230 static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
231 {
232 	int rc;
233 	struct tmc_pages *table_pages = &sg_table->table_pages;
234 
235 	rc = tmc_pages_alloc(table_pages, sg_table->dev,
236 			     dev_to_node(sg_table->dev),
237 			     DMA_TO_DEVICE, NULL);
238 	if (rc)
239 		return rc;
240 	sg_table->table_vaddr = vmap(table_pages->pages,
241 				     table_pages->nr_pages,
242 				     VM_MAP,
243 				     PAGE_KERNEL);
244 	if (!sg_table->table_vaddr)
245 		rc = -ENOMEM;
246 	else
247 		sg_table->table_daddr = table_pages->daddrs[0];
248 	return rc;
249 }
250 
tmc_alloc_data_pages(struct tmc_sg_table * sg_table,void ** pages)251 static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
252 {
253 	int rc;
254 
255 	/* Allocate data pages on the node requested by the caller */
256 	rc = tmc_pages_alloc(&sg_table->data_pages,
257 			     sg_table->dev, sg_table->node,
258 			     DMA_FROM_DEVICE, pages);
259 	if (!rc) {
260 		sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
261 					    sg_table->data_pages.nr_pages,
262 					    VM_MAP,
263 					    PAGE_KERNEL);
264 		if (!sg_table->data_vaddr)
265 			rc = -ENOMEM;
266 	}
267 	return rc;
268 }
269 
270 /*
271  * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
272  * and data buffers. TMC writes to the data buffers and reads from the SG
273  * Table pages.
274  *
275  * @dev		- Device to which page should be DMA mapped.
276  * @node	- Numa node for mem allocations
277  * @nr_tpages	- Number of pages for the table entries.
278  * @nr_dpages	- Number of pages for Data buffer.
279  * @pages	- Optional list of virtual address of pages.
280  */
tmc_alloc_sg_table(struct device * dev,int node,int nr_tpages,int nr_dpages,void ** pages)281 struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
282 					int node,
283 					int nr_tpages,
284 					int nr_dpages,
285 					void **pages)
286 {
287 	long rc;
288 	struct tmc_sg_table *sg_table;
289 
290 	sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
291 	if (!sg_table)
292 		return ERR_PTR(-ENOMEM);
293 	sg_table->data_pages.nr_pages = nr_dpages;
294 	sg_table->table_pages.nr_pages = nr_tpages;
295 	sg_table->node = node;
296 	sg_table->dev = dev;
297 
298 	rc  = tmc_alloc_data_pages(sg_table, pages);
299 	if (!rc)
300 		rc = tmc_alloc_table_pages(sg_table);
301 	if (rc) {
302 		tmc_free_sg_table(sg_table);
303 		kfree(sg_table);
304 		return ERR_PTR(rc);
305 	}
306 
307 	return sg_table;
308 }
309 
310 /*
311  * tmc_sg_table_sync_data_range: Sync the data buffer written
312  * by the device from @offset upto a @size bytes.
313  */
tmc_sg_table_sync_data_range(struct tmc_sg_table * table,u64 offset,u64 size)314 void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
315 				  u64 offset, u64 size)
316 {
317 	int i, index, start;
318 	int npages = DIV_ROUND_UP(size, PAGE_SIZE);
319 	struct device *dev = table->dev;
320 	struct tmc_pages *data = &table->data_pages;
321 
322 	start = offset >> PAGE_SHIFT;
323 	for (i = start; i < (start + npages); i++) {
324 		index = i % data->nr_pages;
325 		dma_sync_single_for_cpu(dev, data->daddrs[index],
326 					PAGE_SIZE, DMA_FROM_DEVICE);
327 	}
328 }
329 
330 /* tmc_sg_sync_table: Sync the page table */
tmc_sg_table_sync_table(struct tmc_sg_table * sg_table)331 void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
332 {
333 	int i;
334 	struct device *dev = sg_table->dev;
335 	struct tmc_pages *table_pages = &sg_table->table_pages;
336 
337 	for (i = 0; i < table_pages->nr_pages; i++)
338 		dma_sync_single_for_device(dev, table_pages->daddrs[i],
339 					   PAGE_SIZE, DMA_TO_DEVICE);
340 }
341 
342 /*
343  * tmc_sg_table_get_data: Get the buffer pointer for data @offset
344  * in the SG buffer. The @bufpp is updated to point to the buffer.
345  * Returns :
346  *	the length of linear data available at @offset.
347  *	or
348  *	<= 0 if no data is available.
349  */
tmc_sg_table_get_data(struct tmc_sg_table * sg_table,u64 offset,size_t len,char ** bufpp)350 ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
351 			      u64 offset, size_t len, char **bufpp)
352 {
353 	size_t size;
354 	int pg_idx = offset >> PAGE_SHIFT;
355 	int pg_offset = offset & (PAGE_SIZE - 1);
356 	struct tmc_pages *data_pages = &sg_table->data_pages;
357 
358 	size = tmc_sg_table_buf_size(sg_table);
359 	if (offset >= size)
360 		return -EINVAL;
361 
362 	/* Make sure we don't go beyond the end */
363 	len = (len < (size - offset)) ? len : size - offset;
364 	/* Respect the page boundaries */
365 	len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
366 	if (len > 0)
367 		*bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
368 	return len;
369 }
370 
371 #ifdef ETR_SG_DEBUG
372 /* Map a dma address to virtual address */
373 static unsigned long
tmc_sg_daddr_to_vaddr(struct tmc_sg_table * sg_table,dma_addr_t addr,bool table)374 tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
375 		      dma_addr_t addr, bool table)
376 {
377 	long offset;
378 	unsigned long base;
379 	struct tmc_pages *tmc_pages;
380 
381 	if (table) {
382 		tmc_pages = &sg_table->table_pages;
383 		base = (unsigned long)sg_table->table_vaddr;
384 	} else {
385 		tmc_pages = &sg_table->data_pages;
386 		base = (unsigned long)sg_table->data_vaddr;
387 	}
388 
389 	offset = tmc_pages_get_offset(tmc_pages, addr);
390 	if (offset < 0)
391 		return 0;
392 	return base + offset;
393 }
394 
395 /* Dump the given sg_table */
tmc_etr_sg_table_dump(struct etr_sg_table * etr_table)396 static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
397 {
398 	sgte_t *ptr;
399 	int i = 0;
400 	dma_addr_t addr;
401 	struct tmc_sg_table *sg_table = etr_table->sg_table;
402 
403 	ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
404 					      etr_table->hwaddr, true);
405 	while (ptr) {
406 		addr = ETR_SG_ADDR(*ptr);
407 		switch (ETR_SG_ET(*ptr)) {
408 		case ETR_SG_ET_NORMAL:
409 			dev_dbg(sg_table->dev,
410 				"%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
411 			ptr++;
412 			break;
413 		case ETR_SG_ET_LINK:
414 			dev_dbg(sg_table->dev,
415 				"%05d: *** %p\t:{L} 0x%llx ***\n",
416 				 i, ptr, addr);
417 			ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
418 							      addr, true);
419 			break;
420 		case ETR_SG_ET_LAST:
421 			dev_dbg(sg_table->dev,
422 				"%05d: ### %p\t:[L] 0x%llx ###\n",
423 				 i, ptr, addr);
424 			return;
425 		default:
426 			dev_dbg(sg_table->dev,
427 				"%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
428 				 i, ptr, addr);
429 			return;
430 		}
431 		i++;
432 	}
433 	dev_dbg(sg_table->dev, "******* End of Table *****\n");
434 }
435 #else
tmc_etr_sg_table_dump(struct etr_sg_table * etr_table)436 static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
437 #endif
438 
439 /*
440  * Populate the SG Table page table entries from table/data
441  * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
442  * So does a Table page. So we keep track of indices of the tables
443  * in each system page and move the pointers accordingly.
444  */
445 #define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
tmc_etr_sg_table_populate(struct etr_sg_table * etr_table)446 static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
447 {
448 	dma_addr_t paddr;
449 	int i, type, nr_entries;
450 	int tpidx = 0; /* index to the current system table_page */
451 	int sgtidx = 0;	/* index to the sg_table within the current syspage */
452 	int sgtentry = 0; /* the entry within the sg_table */
453 	int dpidx = 0; /* index to the current system data_page */
454 	int spidx = 0; /* index to the SG page within the current data page */
455 	sgte_t *ptr; /* pointer to the table entry to fill */
456 	struct tmc_sg_table *sg_table = etr_table->sg_table;
457 	dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
458 	dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
459 
460 	nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
461 	/*
462 	 * Use the contiguous virtual address of the table to update entries.
463 	 */
464 	ptr = sg_table->table_vaddr;
465 	/*
466 	 * Fill all the entries, except the last entry to avoid special
467 	 * checks within the loop.
468 	 */
469 	for (i = 0; i < nr_entries - 1; i++) {
470 		if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
471 			/*
472 			 * Last entry in a sg_table page is a link address to
473 			 * the next table page. If this sg_table is the last
474 			 * one in the system page, it links to the first
475 			 * sg_table in the next system page. Otherwise, it
476 			 * links to the next sg_table page within the system
477 			 * page.
478 			 */
479 			if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
480 				paddr = table_daddrs[tpidx + 1];
481 			} else {
482 				paddr = table_daddrs[tpidx] +
483 					(ETR_SG_PAGE_SIZE * (sgtidx + 1));
484 			}
485 			type = ETR_SG_ET_LINK;
486 		} else {
487 			/*
488 			 * Update the indices to the data_pages to point to the
489 			 * next sg_page in the data buffer.
490 			 */
491 			type = ETR_SG_ET_NORMAL;
492 			paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
493 			if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
494 				dpidx++;
495 		}
496 		*ptr++ = ETR_SG_ENTRY(paddr, type);
497 		/*
498 		 * Move to the next table pointer, moving the table page index
499 		 * if necessary
500 		 */
501 		if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
502 			if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
503 				tpidx++;
504 		}
505 	}
506 
507 	/* Set up the last entry, which is always a data pointer */
508 	paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
509 	*ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
510 }
511 
512 /*
513  * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
514  * populate the table.
515  *
516  * @dev		- Device pointer for the TMC
517  * @node	- NUMA node where the memory should be allocated
518  * @size	- Total size of the data buffer
519  * @pages	- Optional list of page virtual address
520  */
521 static struct etr_sg_table *
tmc_init_etr_sg_table(struct device * dev,int node,unsigned long size,void ** pages)522 tmc_init_etr_sg_table(struct device *dev, int node,
523 		      unsigned long size, void **pages)
524 {
525 	int nr_entries, nr_tpages;
526 	int nr_dpages = size >> PAGE_SHIFT;
527 	struct tmc_sg_table *sg_table;
528 	struct etr_sg_table *etr_table;
529 
530 	etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
531 	if (!etr_table)
532 		return ERR_PTR(-ENOMEM);
533 	nr_entries = tmc_etr_sg_table_entries(nr_dpages);
534 	nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
535 
536 	sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
537 	if (IS_ERR(sg_table)) {
538 		kfree(etr_table);
539 		return ERR_CAST(sg_table);
540 	}
541 
542 	etr_table->sg_table = sg_table;
543 	/* TMC should use table base address for DBA */
544 	etr_table->hwaddr = sg_table->table_daddr;
545 	tmc_etr_sg_table_populate(etr_table);
546 	/* Sync the table pages for the HW */
547 	tmc_sg_table_sync_table(sg_table);
548 	tmc_etr_sg_table_dump(etr_table);
549 
550 	return etr_table;
551 }
552 
553 /*
554  * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
555  */
tmc_etr_alloc_flat_buf(struct tmc_drvdata * drvdata,struct etr_buf * etr_buf,int node,void ** pages)556 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
557 				  struct etr_buf *etr_buf, int node,
558 				  void **pages)
559 {
560 	struct etr_flat_buf *flat_buf;
561 
562 	/* We cannot reuse existing pages for flat buf */
563 	if (pages)
564 		return -EINVAL;
565 
566 	flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
567 	if (!flat_buf)
568 		return -ENOMEM;
569 
570 	flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
571 					     &flat_buf->daddr, GFP_KERNEL);
572 	if (!flat_buf->vaddr) {
573 		kfree(flat_buf);
574 		return -ENOMEM;
575 	}
576 
577 	flat_buf->size = etr_buf->size;
578 	flat_buf->dev = drvdata->dev;
579 	etr_buf->hwaddr = flat_buf->daddr;
580 	etr_buf->mode = ETR_MODE_FLAT;
581 	etr_buf->private = flat_buf;
582 	return 0;
583 }
584 
tmc_etr_free_flat_buf(struct etr_buf * etr_buf)585 static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
586 {
587 	struct etr_flat_buf *flat_buf = etr_buf->private;
588 
589 	if (flat_buf && flat_buf->daddr)
590 		dma_free_coherent(flat_buf->dev, flat_buf->size,
591 				  flat_buf->vaddr, flat_buf->daddr);
592 	kfree(flat_buf);
593 }
594 
tmc_etr_sync_flat_buf(struct etr_buf * etr_buf,u64 rrp,u64 rwp)595 static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
596 {
597 	/*
598 	 * Adjust the buffer to point to the beginning of the trace data
599 	 * and update the available trace data.
600 	 */
601 	etr_buf->offset = rrp - etr_buf->hwaddr;
602 	if (etr_buf->full)
603 		etr_buf->len = etr_buf->size;
604 	else
605 		etr_buf->len = rwp - rrp;
606 }
607 
tmc_etr_get_data_flat_buf(struct etr_buf * etr_buf,u64 offset,size_t len,char ** bufpp)608 static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
609 					 u64 offset, size_t len, char **bufpp)
610 {
611 	struct etr_flat_buf *flat_buf = etr_buf->private;
612 
613 	*bufpp = (char *)flat_buf->vaddr + offset;
614 	/*
615 	 * tmc_etr_buf_get_data already adjusts the length to handle
616 	 * buffer wrapping around.
617 	 */
618 	return len;
619 }
620 
621 static const struct etr_buf_operations etr_flat_buf_ops = {
622 	.alloc = tmc_etr_alloc_flat_buf,
623 	.free = tmc_etr_free_flat_buf,
624 	.sync = tmc_etr_sync_flat_buf,
625 	.get_data = tmc_etr_get_data_flat_buf,
626 };
627 
628 /*
629  * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
630  * appropriately.
631  */
tmc_etr_alloc_sg_buf(struct tmc_drvdata * drvdata,struct etr_buf * etr_buf,int node,void ** pages)632 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
633 				struct etr_buf *etr_buf, int node,
634 				void **pages)
635 {
636 	struct etr_sg_table *etr_table;
637 
638 	etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
639 					  etr_buf->size, pages);
640 	if (IS_ERR(etr_table))
641 		return -ENOMEM;
642 	etr_buf->hwaddr = etr_table->hwaddr;
643 	etr_buf->mode = ETR_MODE_ETR_SG;
644 	etr_buf->private = etr_table;
645 	return 0;
646 }
647 
tmc_etr_free_sg_buf(struct etr_buf * etr_buf)648 static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
649 {
650 	struct etr_sg_table *etr_table = etr_buf->private;
651 
652 	if (etr_table) {
653 		tmc_free_sg_table(etr_table->sg_table);
654 		kfree(etr_table);
655 	}
656 }
657 
tmc_etr_get_data_sg_buf(struct etr_buf * etr_buf,u64 offset,size_t len,char ** bufpp)658 static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
659 				       size_t len, char **bufpp)
660 {
661 	struct etr_sg_table *etr_table = etr_buf->private;
662 
663 	return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
664 }
665 
tmc_etr_sync_sg_buf(struct etr_buf * etr_buf,u64 rrp,u64 rwp)666 static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
667 {
668 	long r_offset, w_offset;
669 	struct etr_sg_table *etr_table = etr_buf->private;
670 	struct tmc_sg_table *table = etr_table->sg_table;
671 
672 	/* Convert hw address to offset in the buffer */
673 	r_offset = tmc_sg_get_data_page_offset(table, rrp);
674 	if (r_offset < 0) {
675 		dev_warn(table->dev,
676 			 "Unable to map RRP %llx to offset\n", rrp);
677 		etr_buf->len = 0;
678 		return;
679 	}
680 
681 	w_offset = tmc_sg_get_data_page_offset(table, rwp);
682 	if (w_offset < 0) {
683 		dev_warn(table->dev,
684 			 "Unable to map RWP %llx to offset\n", rwp);
685 		etr_buf->len = 0;
686 		return;
687 	}
688 
689 	etr_buf->offset = r_offset;
690 	if (etr_buf->full)
691 		etr_buf->len = etr_buf->size;
692 	else
693 		etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
694 				w_offset - r_offset;
695 	tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
696 }
697 
698 static const struct etr_buf_operations etr_sg_buf_ops = {
699 	.alloc = tmc_etr_alloc_sg_buf,
700 	.free = tmc_etr_free_sg_buf,
701 	.sync = tmc_etr_sync_sg_buf,
702 	.get_data = tmc_etr_get_data_sg_buf,
703 };
704 
705 /*
706  * TMC ETR could be connected to a CATU device, which can provide address
707  * translation service. This is represented by the Output port of the TMC
708  * (ETR) connected to the input port of the CATU.
709  *
710  * Returns	: coresight_device ptr for the CATU device if a CATU is found.
711  *		: NULL otherwise.
712  */
713 struct coresight_device *
tmc_etr_get_catu_device(struct tmc_drvdata * drvdata)714 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
715 {
716 	int i;
717 	struct coresight_device *tmp, *etr = drvdata->csdev;
718 
719 	if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
720 		return NULL;
721 
722 	for (i = 0; i < etr->nr_outport; i++) {
723 		tmp = etr->conns[i].child_dev;
724 		if (tmp && coresight_is_catu_device(tmp))
725 			return tmp;
726 	}
727 
728 	return NULL;
729 }
730 
tmc_etr_enable_catu(struct tmc_drvdata * drvdata)731 static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
732 {
733 	struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
734 
735 	if (catu && helper_ops(catu)->enable)
736 		helper_ops(catu)->enable(catu, drvdata->etr_buf);
737 }
738 
tmc_etr_disable_catu(struct tmc_drvdata * drvdata)739 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
740 {
741 	struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
742 
743 	if (catu && helper_ops(catu)->disable)
744 		helper_ops(catu)->disable(catu, drvdata->etr_buf);
745 }
746 
747 static const struct etr_buf_operations *etr_buf_ops[] = {
748 	[ETR_MODE_FLAT] = &etr_flat_buf_ops,
749 	[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
750 	[ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
751 						? &etr_catu_buf_ops : NULL,
752 };
753 
tmc_etr_mode_alloc_buf(int mode,struct tmc_drvdata * drvdata,struct etr_buf * etr_buf,int node,void ** pages)754 static inline int tmc_etr_mode_alloc_buf(int mode,
755 					 struct tmc_drvdata *drvdata,
756 					 struct etr_buf *etr_buf, int node,
757 					 void **pages)
758 {
759 	int rc = -EINVAL;
760 
761 	switch (mode) {
762 	case ETR_MODE_FLAT:
763 	case ETR_MODE_ETR_SG:
764 	case ETR_MODE_CATU:
765 		if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
766 			rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
767 						      node, pages);
768 		if (!rc)
769 			etr_buf->ops = etr_buf_ops[mode];
770 		return rc;
771 	default:
772 		return -EINVAL;
773 	}
774 }
775 
776 /*
777  * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
778  * @drvdata	: ETR device details.
779  * @size	: size of the requested buffer.
780  * @flags	: Required properties for the buffer.
781  * @node	: Node for memory allocations.
782  * @pages	: An optional list of pages.
783  */
tmc_alloc_etr_buf(struct tmc_drvdata * drvdata,ssize_t size,int flags,int node,void ** pages)784 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
785 					 ssize_t size, int flags,
786 					 int node, void **pages)
787 {
788 	int rc = -ENOMEM;
789 	bool has_etr_sg, has_iommu;
790 	bool has_sg, has_catu;
791 	struct etr_buf *etr_buf;
792 
793 	has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
794 	has_iommu = iommu_get_domain_for_dev(drvdata->dev);
795 	has_catu = !!tmc_etr_get_catu_device(drvdata);
796 
797 	has_sg = has_catu || has_etr_sg;
798 
799 	etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
800 	if (!etr_buf)
801 		return ERR_PTR(-ENOMEM);
802 
803 	etr_buf->size = size;
804 
805 	/*
806 	 * If we have to use an existing list of pages, we cannot reliably
807 	 * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
808 	 * we use the contiguous DMA memory if at least one of the following
809 	 * conditions is true:
810 	 *  a) The ETR cannot use Scatter-Gather.
811 	 *  b) we have a backing IOMMU
812 	 *  c) The requested memory size is smaller (< 1M).
813 	 *
814 	 * Fallback to available mechanisms.
815 	 *
816 	 */
817 	if (!pages &&
818 	    (!has_sg || has_iommu || size < SZ_1M))
819 		rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
820 					    etr_buf, node, pages);
821 	if (rc && has_etr_sg)
822 		rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
823 					    etr_buf, node, pages);
824 	if (rc && has_catu)
825 		rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
826 					    etr_buf, node, pages);
827 	if (rc) {
828 		kfree(etr_buf);
829 		return ERR_PTR(rc);
830 	}
831 
832 	dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
833 		(unsigned long)size >> 10, etr_buf->mode);
834 	return etr_buf;
835 }
836 
tmc_free_etr_buf(struct etr_buf * etr_buf)837 static void tmc_free_etr_buf(struct etr_buf *etr_buf)
838 {
839 	WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
840 	etr_buf->ops->free(etr_buf);
841 	kfree(etr_buf);
842 }
843 
844 /*
845  * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
846  * with a maximum of @len bytes.
847  * Returns: The size of the linear data available @pos, with *bufpp
848  * updated to point to the buffer.
849  */
tmc_etr_buf_get_data(struct etr_buf * etr_buf,u64 offset,size_t len,char ** bufpp)850 static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
851 				    u64 offset, size_t len, char **bufpp)
852 {
853 	/* Adjust the length to limit this transaction to end of buffer */
854 	len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
855 
856 	return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
857 }
858 
859 static inline s64
tmc_etr_buf_insert_barrier_packet(struct etr_buf * etr_buf,u64 offset)860 tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
861 {
862 	ssize_t len;
863 	char *bufp;
864 
865 	len = tmc_etr_buf_get_data(etr_buf, offset,
866 				   CORESIGHT_BARRIER_PKT_SIZE, &bufp);
867 	if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
868 		return -EINVAL;
869 	coresight_insert_barrier_packet(bufp);
870 	return offset + CORESIGHT_BARRIER_PKT_SIZE;
871 }
872 
873 /*
874  * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
875  * Makes sure the trace data is synced to the memory for consumption.
876  * @etr_buf->offset will hold the offset to the beginning of the trace data
877  * within the buffer, with @etr_buf->len bytes to consume.
878  */
tmc_sync_etr_buf(struct tmc_drvdata * drvdata)879 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
880 {
881 	struct etr_buf *etr_buf = drvdata->etr_buf;
882 	u64 rrp, rwp;
883 	u32 status;
884 
885 	rrp = tmc_read_rrp(drvdata);
886 	rwp = tmc_read_rwp(drvdata);
887 	status = readl_relaxed(drvdata->base + TMC_STS);
888 	etr_buf->full = status & TMC_STS_FULL;
889 
890 	WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
891 
892 	etr_buf->ops->sync(etr_buf, rrp, rwp);
893 
894 	/* Insert barrier packets at the beginning, if there was an overflow */
895 	if (etr_buf->full)
896 		tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
897 }
898 
tmc_etr_enable_hw(struct tmc_drvdata * drvdata,struct etr_buf * etr_buf)899 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
900 			      struct etr_buf *etr_buf)
901 {
902 	u32 axictl, sts;
903 
904 	/* Callers should provide an appropriate buffer for use */
905 	if (WARN_ON(!etr_buf || drvdata->etr_buf))
906 		return;
907 	drvdata->etr_buf = etr_buf;
908 
909 	/*
910 	 * If this ETR is connected to a CATU, enable it before we turn
911 	 * this on
912 	 */
913 	tmc_etr_enable_catu(drvdata);
914 
915 	CS_UNLOCK(drvdata->base);
916 
917 	/* Wait for TMCSReady bit to be set */
918 	tmc_wait_for_tmcready(drvdata);
919 
920 	writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
921 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
922 
923 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
924 	axictl &= ~TMC_AXICTL_CLEAR_MASK;
925 	axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
926 	axictl |= TMC_AXICTL_AXCACHE_OS;
927 
928 	if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
929 		axictl &= ~TMC_AXICTL_ARCACHE_MASK;
930 		axictl |= TMC_AXICTL_ARCACHE_OS;
931 	}
932 
933 	if (etr_buf->mode == ETR_MODE_ETR_SG) {
934 		if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
935 			return;
936 		axictl |= TMC_AXICTL_SCT_GAT_MODE;
937 	}
938 
939 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
940 	tmc_write_dba(drvdata, etr_buf->hwaddr);
941 	/*
942 	 * If the TMC pointers must be programmed before the session,
943 	 * we have to set it properly (i.e, RRP/RWP to base address and
944 	 * STS to "not full").
945 	 */
946 	if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
947 		tmc_write_rrp(drvdata, etr_buf->hwaddr);
948 		tmc_write_rwp(drvdata, etr_buf->hwaddr);
949 		sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
950 		writel_relaxed(sts, drvdata->base + TMC_STS);
951 	}
952 
953 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
954 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
955 		       TMC_FFCR_TRIGON_TRIGIN,
956 		       drvdata->base + TMC_FFCR);
957 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
958 	tmc_enable_hw(drvdata);
959 
960 	CS_LOCK(drvdata->base);
961 }
962 
963 /*
964  * Return the available trace data in the buffer (starts at etr_buf->offset,
965  * limited by etr_buf->len) from @pos, with a maximum limit of @len,
966  * also updating the @bufpp on where to find it. Since the trace data
967  * starts at anywhere in the buffer, depending on the RRP, we adjust the
968  * @len returned to handle buffer wrapping around.
969  *
970  * We are protected here by drvdata->reading != 0, which ensures the
971  * sysfs_buf stays alive.
972  */
tmc_etr_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)973 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
974 				loff_t pos, size_t len, char **bufpp)
975 {
976 	s64 offset;
977 	ssize_t actual = len;
978 	struct etr_buf *etr_buf = drvdata->sysfs_buf;
979 
980 	if (pos + actual > etr_buf->len)
981 		actual = etr_buf->len - pos;
982 	if (actual <= 0)
983 		return actual;
984 
985 	/* Compute the offset from which we read the data */
986 	offset = etr_buf->offset + pos;
987 	if (offset >= etr_buf->size)
988 		offset -= etr_buf->size;
989 	return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
990 }
991 
992 static struct etr_buf *
tmc_etr_setup_sysfs_buf(struct tmc_drvdata * drvdata)993 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
994 {
995 	return tmc_alloc_etr_buf(drvdata, drvdata->size,
996 				 0, cpu_to_node(0), NULL);
997 }
998 
999 static void
tmc_etr_free_sysfs_buf(struct etr_buf * buf)1000 tmc_etr_free_sysfs_buf(struct etr_buf *buf)
1001 {
1002 	if (buf)
1003 		tmc_free_etr_buf(buf);
1004 }
1005 
tmc_etr_sync_sysfs_buf(struct tmc_drvdata * drvdata)1006 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1007 {
1008 	struct etr_buf *etr_buf = drvdata->etr_buf;
1009 
1010 	if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1011 		tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1012 		drvdata->sysfs_buf = NULL;
1013 	} else {
1014 		tmc_sync_etr_buf(drvdata);
1015 	}
1016 }
1017 
tmc_etr_disable_hw(struct tmc_drvdata * drvdata)1018 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1019 {
1020 	CS_UNLOCK(drvdata->base);
1021 
1022 	tmc_flush_and_stop(drvdata);
1023 	/*
1024 	 * When operating in sysFS mode the content of the buffer needs to be
1025 	 * read before the TMC is disabled.
1026 	 */
1027 	if (drvdata->mode == CS_MODE_SYSFS)
1028 		tmc_etr_sync_sysfs_buf(drvdata);
1029 
1030 	tmc_disable_hw(drvdata);
1031 
1032 	CS_LOCK(drvdata->base);
1033 
1034 	/* Disable CATU device if this ETR is connected to one */
1035 	tmc_etr_disable_catu(drvdata);
1036 	/* Reset the ETR buf used by hardware */
1037 	drvdata->etr_buf = NULL;
1038 }
1039 
tmc_enable_etr_sink_sysfs(struct coresight_device * csdev)1040 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1041 {
1042 	int ret = 0;
1043 	unsigned long flags;
1044 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1045 	struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
1046 
1047 	/*
1048 	 * If we are enabling the ETR from disabled state, we need to make
1049 	 * sure we have a buffer with the right size. The etr_buf is not reset
1050 	 * immediately after we stop the tracing in SYSFS mode as we wait for
1051 	 * the user to collect the data. We may be able to reuse the existing
1052 	 * buffer, provided the size matches. Any allocation has to be done
1053 	 * with the lock released.
1054 	 */
1055 	spin_lock_irqsave(&drvdata->spinlock, flags);
1056 	sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1057 	if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1058 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
1059 
1060 		/* Allocate memory with the locks released */
1061 		free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
1062 		if (IS_ERR(new_buf))
1063 			return PTR_ERR(new_buf);
1064 
1065 		/* Let's try again */
1066 		spin_lock_irqsave(&drvdata->spinlock, flags);
1067 	}
1068 
1069 	if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
1070 		ret = -EBUSY;
1071 		goto out;
1072 	}
1073 
1074 	/*
1075 	 * In sysFS mode we can have multiple writers per sink.  Since this
1076 	 * sink is already enabled no memory is needed and the HW need not be
1077 	 * touched, even if the buffer size has changed.
1078 	 */
1079 	if (drvdata->mode == CS_MODE_SYSFS)
1080 		goto out;
1081 
1082 	/*
1083 	 * If we don't have a buffer or it doesn't match the requested size,
1084 	 * use the buffer allocated above. Otherwise reuse the existing buffer.
1085 	 */
1086 	sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1087 	if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
1088 		free_buf = sysfs_buf;
1089 		drvdata->sysfs_buf = new_buf;
1090 	}
1091 
1092 	drvdata->mode = CS_MODE_SYSFS;
1093 	tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1094 out:
1095 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1096 
1097 	/* Free memory outside the spinlock if need be */
1098 	if (free_buf)
1099 		tmc_etr_free_sysfs_buf(free_buf);
1100 
1101 	if (!ret)
1102 		dev_info(drvdata->dev, "TMC-ETR enabled\n");
1103 
1104 	return ret;
1105 }
1106 
tmc_enable_etr_sink_perf(struct coresight_device * csdev)1107 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
1108 {
1109 	/* We don't support perf mode yet ! */
1110 	return -EINVAL;
1111 }
1112 
tmc_enable_etr_sink(struct coresight_device * csdev,u32 mode)1113 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
1114 {
1115 	switch (mode) {
1116 	case CS_MODE_SYSFS:
1117 		return tmc_enable_etr_sink_sysfs(csdev);
1118 	case CS_MODE_PERF:
1119 		return tmc_enable_etr_sink_perf(csdev);
1120 	}
1121 
1122 	/* We shouldn't be here */
1123 	return -EINVAL;
1124 }
1125 
tmc_disable_etr_sink(struct coresight_device * csdev)1126 static void tmc_disable_etr_sink(struct coresight_device *csdev)
1127 {
1128 	unsigned long flags;
1129 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1130 
1131 	spin_lock_irqsave(&drvdata->spinlock, flags);
1132 	if (drvdata->reading) {
1133 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
1134 		return;
1135 	}
1136 
1137 	/* Disable the TMC only if it needs to */
1138 	if (drvdata->mode != CS_MODE_DISABLED) {
1139 		tmc_etr_disable_hw(drvdata);
1140 		drvdata->mode = CS_MODE_DISABLED;
1141 	}
1142 
1143 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1144 
1145 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
1146 }
1147 
1148 static const struct coresight_ops_sink tmc_etr_sink_ops = {
1149 	.enable		= tmc_enable_etr_sink,
1150 	.disable	= tmc_disable_etr_sink,
1151 };
1152 
1153 const struct coresight_ops tmc_etr_cs_ops = {
1154 	.sink_ops	= &tmc_etr_sink_ops,
1155 };
1156 
tmc_read_prepare_etr(struct tmc_drvdata * drvdata)1157 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1158 {
1159 	int ret = 0;
1160 	unsigned long flags;
1161 
1162 	/* config types are set a boot time and never change */
1163 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1164 		return -EINVAL;
1165 
1166 	spin_lock_irqsave(&drvdata->spinlock, flags);
1167 	if (drvdata->reading) {
1168 		ret = -EBUSY;
1169 		goto out;
1170 	}
1171 
1172 	/* Don't interfere if operated from Perf */
1173 	if (drvdata->mode == CS_MODE_PERF) {
1174 		ret = -EINVAL;
1175 		goto out;
1176 	}
1177 
1178 	/* If sysfs_buf is NULL the trace data has been read already */
1179 	if (!drvdata->sysfs_buf) {
1180 		ret = -EINVAL;
1181 		goto out;
1182 	}
1183 
1184 	/* Disable the TMC if we are trying to read from a running session */
1185 	if (drvdata->mode == CS_MODE_SYSFS)
1186 		tmc_etr_disable_hw(drvdata);
1187 
1188 	drvdata->reading = true;
1189 out:
1190 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1191 
1192 	return ret;
1193 }
1194 
tmc_read_unprepare_etr(struct tmc_drvdata * drvdata)1195 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1196 {
1197 	unsigned long flags;
1198 	struct etr_buf *sysfs_buf = NULL;
1199 
1200 	/* config types are set a boot time and never change */
1201 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1202 		return -EINVAL;
1203 
1204 	spin_lock_irqsave(&drvdata->spinlock, flags);
1205 
1206 	/* RE-enable the TMC if need be */
1207 	if (drvdata->mode == CS_MODE_SYSFS) {
1208 		/*
1209 		 * The trace run will continue with the same allocated trace
1210 		 * buffer. Since the tracer is still enabled drvdata::buf can't
1211 		 * be NULL.
1212 		 */
1213 		tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1214 	} else {
1215 		/*
1216 		 * The ETR is not tracing and the buffer was just read.
1217 		 * As such prepare to free the trace buffer.
1218 		 */
1219 		sysfs_buf = drvdata->sysfs_buf;
1220 		drvdata->sysfs_buf = NULL;
1221 	}
1222 
1223 	drvdata->reading = false;
1224 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
1225 
1226 	/* Free allocated memory out side of the spinlock */
1227 	if (sysfs_buf)
1228 		tmc_etr_free_sysfs_buf(sysfs_buf);
1229 
1230 	return 0;
1231 }
1232