• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * vsp1_dl.h  --  R-Car VSP1 Display List
3  *
4  * Copyright (C) 2015 Renesas Corporation
5  *
6  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/gfp.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 
20 #include "vsp1.h"
21 #include "vsp1_dl.h"
22 
23 #define VSP1_DL_NUM_ENTRIES		256
24 
25 #define VSP1_DLH_INT_ENABLE		(1 << 1)
26 #define VSP1_DLH_AUTO_START		(1 << 0)
27 
28 struct vsp1_dl_header_list {
29 	u32 num_bytes;
30 	u32 addr;
31 } __attribute__((__packed__));
32 
33 struct vsp1_dl_header {
34 	u32 num_lists;
35 	struct vsp1_dl_header_list lists[8];
36 	u32 next_header;
37 	u32 flags;
38 } __attribute__((__packed__));
39 
40 struct vsp1_dl_entry {
41 	u32 addr;
42 	u32 data;
43 } __attribute__((__packed__));
44 
45 /**
46  * struct vsp1_dl_body - Display list body
47  * @list: entry in the display list list of bodies
48  * @vsp1: the VSP1 device
49  * @entries: array of entries
50  * @dma: DMA address of the entries
51  * @size: size of the DMA memory in bytes
52  * @num_entries: number of stored entries
53  */
54 struct vsp1_dl_body {
55 	struct list_head list;
56 	struct vsp1_device *vsp1;
57 
58 	struct vsp1_dl_entry *entries;
59 	dma_addr_t dma;
60 	size_t size;
61 
62 	unsigned int num_entries;
63 };
64 
65 /**
66  * struct vsp1_dl_list - Display list
67  * @list: entry in the display list manager lists
68  * @dlm: the display list manager
69  * @header: display list header, NULL for headerless lists
70  * @dma: DMA address for the header
71  * @body0: first display list body
72  * @fragments: list of extra display list bodies
73  * @chain: entry in the display list partition chain
74  */
75 struct vsp1_dl_list {
76 	struct list_head list;
77 	struct vsp1_dl_manager *dlm;
78 
79 	struct vsp1_dl_header *header;
80 	dma_addr_t dma;
81 
82 	struct vsp1_dl_body body0;
83 	struct list_head fragments;
84 
85 	bool has_chain;
86 	struct list_head chain;
87 };
88 
89 enum vsp1_dl_mode {
90 	VSP1_DL_MODE_HEADER,
91 	VSP1_DL_MODE_HEADERLESS,
92 };
93 
94 /**
95  * struct vsp1_dl_manager - Display List manager
96  * @index: index of the related WPF
97  * @mode: display list operation mode (header or headerless)
98  * @vsp1: the VSP1 device
99  * @lock: protects the free, active, queued, pending and gc_fragments lists
100  * @free: array of all free display lists
101  * @active: list currently being processed (loaded) by hardware
102  * @queued: list queued to the hardware (written to the DL registers)
103  * @pending: list waiting to be queued to the hardware
104  * @gc_work: fragments garbage collector work struct
105  * @gc_fragments: array of display list fragments waiting to be freed
106  */
107 struct vsp1_dl_manager {
108 	unsigned int index;
109 	enum vsp1_dl_mode mode;
110 	struct vsp1_device *vsp1;
111 
112 	spinlock_t lock;
113 	struct list_head free;
114 	struct vsp1_dl_list *active;
115 	struct vsp1_dl_list *queued;
116 	struct vsp1_dl_list *pending;
117 
118 	struct work_struct gc_work;
119 	struct list_head gc_fragments;
120 };
121 
122 /* -----------------------------------------------------------------------------
123  * Display List Body Management
124  */
125 
126 /*
127  * Initialize a display list body object and allocate DMA memory for the body
128  * data. The display list body object is expected to have been initialized to
129  * 0 when allocated.
130  */
vsp1_dl_body_init(struct vsp1_device * vsp1,struct vsp1_dl_body * dlb,unsigned int num_entries,size_t extra_size)131 static int vsp1_dl_body_init(struct vsp1_device *vsp1,
132 			     struct vsp1_dl_body *dlb, unsigned int num_entries,
133 			     size_t extra_size)
134 {
135 	size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
136 
137 	dlb->vsp1 = vsp1;
138 	dlb->size = size;
139 
140 	dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
141 				    GFP_KERNEL);
142 	if (!dlb->entries)
143 		return -ENOMEM;
144 
145 	return 0;
146 }
147 
148 /*
149  * Cleanup a display list body and free allocated DMA memory allocated.
150  */
vsp1_dl_body_cleanup(struct vsp1_dl_body * dlb)151 static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
152 {
153 	dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
154 }
155 
156 /**
157  * vsp1_dl_fragment_alloc - Allocate a display list fragment
158  * @vsp1: The VSP1 device
159  * @num_entries: The maximum number of entries that the fragment can contain
160  *
161  * Allocate a display list fragment with enough memory to contain the requested
162  * number of entries.
163  *
164  * Return a pointer to a fragment on success or NULL if memory can't be
165  * allocated.
166  */
vsp1_dl_fragment_alloc(struct vsp1_device * vsp1,unsigned int num_entries)167 struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
168 					    unsigned int num_entries)
169 {
170 	struct vsp1_dl_body *dlb;
171 	int ret;
172 
173 	dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
174 	if (!dlb)
175 		return NULL;
176 
177 	ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
178 	if (ret < 0) {
179 		kfree(dlb);
180 		return NULL;
181 	}
182 
183 	return dlb;
184 }
185 
186 /**
187  * vsp1_dl_fragment_free - Free a display list fragment
188  * @dlb: The fragment
189  *
190  * Free the given display list fragment and the associated DMA memory.
191  *
192  * Fragments must only be freed explicitly if they are not added to a display
193  * list, as the display list will take ownership of them and free them
194  * otherwise. Manual free typically happens at cleanup time for fragments that
195  * have been allocated but not used.
196  *
197  * Passing a NULL pointer to this function is safe, in that case no operation
198  * will be performed.
199  */
vsp1_dl_fragment_free(struct vsp1_dl_body * dlb)200 void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
201 {
202 	if (!dlb)
203 		return;
204 
205 	vsp1_dl_body_cleanup(dlb);
206 	kfree(dlb);
207 }
208 
209 /**
210  * vsp1_dl_fragment_write - Write a register to a display list fragment
211  * @dlb: The fragment
212  * @reg: The register address
213  * @data: The register value
214  *
215  * Write the given register and value to the display list fragment. The maximum
216  * number of entries that can be written in a fragment is specified when the
217  * fragment is allocated by vsp1_dl_fragment_alloc().
218  */
vsp1_dl_fragment_write(struct vsp1_dl_body * dlb,u32 reg,u32 data)219 void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
220 {
221 	dlb->entries[dlb->num_entries].addr = reg;
222 	dlb->entries[dlb->num_entries].data = data;
223 	dlb->num_entries++;
224 }
225 
226 /* -----------------------------------------------------------------------------
227  * Display List Transaction Management
228  */
229 
vsp1_dl_list_alloc(struct vsp1_dl_manager * dlm)230 static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
231 {
232 	struct vsp1_dl_list *dl;
233 	size_t header_size;
234 	int ret;
235 
236 	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
237 	if (!dl)
238 		return NULL;
239 
240 	INIT_LIST_HEAD(&dl->fragments);
241 	dl->dlm = dlm;
242 
243 	/* Initialize the display list body and allocate DMA memory for the body
244 	 * and the optional header. Both are allocated together to avoid memory
245 	 * fragmentation, with the header located right after the body in
246 	 * memory.
247 	 */
248 	header_size = dlm->mode == VSP1_DL_MODE_HEADER
249 		    ? ALIGN(sizeof(struct vsp1_dl_header), 8)
250 		    : 0;
251 
252 	ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
253 				header_size);
254 	if (ret < 0) {
255 		kfree(dl);
256 		return NULL;
257 	}
258 
259 	if (dlm->mode == VSP1_DL_MODE_HEADER) {
260 		size_t header_offset = VSP1_DL_NUM_ENTRIES
261 				     * sizeof(*dl->body0.entries);
262 
263 		dl->header = ((void *)dl->body0.entries) + header_offset;
264 		dl->dma = dl->body0.dma + header_offset;
265 
266 		memset(dl->header, 0, sizeof(*dl->header));
267 		dl->header->lists[0].addr = dl->body0.dma;
268 	}
269 
270 	return dl;
271 }
272 
vsp1_dl_list_free(struct vsp1_dl_list * dl)273 static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
274 {
275 	vsp1_dl_body_cleanup(&dl->body0);
276 	list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
277 	kfree(dl);
278 }
279 
280 /**
281  * vsp1_dl_list_get - Get a free display list
282  * @dlm: The display list manager
283  *
284  * Get a display list from the pool of free lists and return it.
285  *
286  * This function must be called without the display list manager lock held.
287  */
vsp1_dl_list_get(struct vsp1_dl_manager * dlm)288 struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
289 {
290 	struct vsp1_dl_list *dl = NULL;
291 	unsigned long flags;
292 
293 	spin_lock_irqsave(&dlm->lock, flags);
294 
295 	if (!list_empty(&dlm->free)) {
296 		dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
297 		list_del(&dl->list);
298 
299 		/*
300 		 * The display list chain must be initialised to ensure every
301 		 * display list can assert list_empty() if it is not in a chain.
302 		 */
303 		INIT_LIST_HEAD(&dl->chain);
304 	}
305 
306 	spin_unlock_irqrestore(&dlm->lock, flags);
307 
308 	return dl;
309 }
310 
311 /* This function must be called with the display list manager lock held.*/
__vsp1_dl_list_put(struct vsp1_dl_list * dl)312 static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
313 {
314 	struct vsp1_dl_list *dl_child;
315 
316 	if (!dl)
317 		return;
318 
319 	/*
320 	 * Release any linked display-lists which were chained for a single
321 	 * hardware operation.
322 	 */
323 	if (dl->has_chain) {
324 		list_for_each_entry(dl_child, &dl->chain, chain)
325 			__vsp1_dl_list_put(dl_child);
326 	}
327 
328 	dl->has_chain = false;
329 
330 	/*
331 	 * We can't free fragments here as DMA memory can only be freed in
332 	 * interruptible context. Move all fragments to the display list
333 	 * manager's list of fragments to be freed, they will be
334 	 * garbage-collected by the work queue.
335 	 */
336 	if (!list_empty(&dl->fragments)) {
337 		list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
338 		schedule_work(&dl->dlm->gc_work);
339 	}
340 
341 	dl->body0.num_entries = 0;
342 
343 	list_add_tail(&dl->list, &dl->dlm->free);
344 }
345 
346 /**
347  * vsp1_dl_list_put - Release a display list
348  * @dl: The display list
349  *
350  * Release the display list and return it to the pool of free lists.
351  *
352  * Passing a NULL pointer to this function is safe, in that case no operation
353  * will be performed.
354  */
vsp1_dl_list_put(struct vsp1_dl_list * dl)355 void vsp1_dl_list_put(struct vsp1_dl_list *dl)
356 {
357 	unsigned long flags;
358 
359 	if (!dl)
360 		return;
361 
362 	spin_lock_irqsave(&dl->dlm->lock, flags);
363 	__vsp1_dl_list_put(dl);
364 	spin_unlock_irqrestore(&dl->dlm->lock, flags);
365 }
366 
367 /**
368  * vsp1_dl_list_write - Write a register to the display list
369  * @dl: The display list
370  * @reg: The register address
371  * @data: The register value
372  *
373  * Write the given register and value to the display list. Up to 256 registers
374  * can be written per display list.
375  */
vsp1_dl_list_write(struct vsp1_dl_list * dl,u32 reg,u32 data)376 void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
377 {
378 	vsp1_dl_fragment_write(&dl->body0, reg, data);
379 }
380 
381 /**
382  * vsp1_dl_list_add_fragment - Add a fragment to the display list
383  * @dl: The display list
384  * @dlb: The fragment
385  *
386  * Add a display list body as a fragment to a display list. Registers contained
387  * in fragments are processed after registers contained in the main display
388  * list, in the order in which fragments are added.
389  *
390  * Adding a fragment to a display list passes ownership of the fragment to the
391  * list. The caller must not touch the fragment after this call, and must not
392  * free it explicitly with vsp1_dl_fragment_free().
393  *
394  * Fragments are only usable for display lists in header mode. Attempt to
395  * add a fragment to a header-less display list will return an error.
396  */
vsp1_dl_list_add_fragment(struct vsp1_dl_list * dl,struct vsp1_dl_body * dlb)397 int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
398 			      struct vsp1_dl_body *dlb)
399 {
400 	/* Multi-body lists are only available in header mode. */
401 	if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
402 		return -EINVAL;
403 
404 	list_add_tail(&dlb->list, &dl->fragments);
405 	return 0;
406 }
407 
408 /**
409  * vsp1_dl_list_add_chain - Add a display list to a chain
410  * @head: The head display list
411  * @dl: The new display list
412  *
413  * Add a display list to an existing display list chain. The chained lists
414  * will be automatically processed by the hardware without intervention from
415  * the CPU. A display list end interrupt will only complete after the last
416  * display list in the chain has completed processing.
417  *
418  * Adding a display list to a chain passes ownership of the display list to
419  * the head display list item. The chain is released when the head dl item is
420  * put back with __vsp1_dl_list_put().
421  *
422  * Chained display lists are only usable in header mode. Attempts to add a
423  * display list to a chain in header-less mode will return an error.
424  */
vsp1_dl_list_add_chain(struct vsp1_dl_list * head,struct vsp1_dl_list * dl)425 int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
426 			   struct vsp1_dl_list *dl)
427 {
428 	/* Chained lists are only available in header mode. */
429 	if (head->dlm->mode != VSP1_DL_MODE_HEADER)
430 		return -EINVAL;
431 
432 	head->has_chain = true;
433 	list_add_tail(&dl->chain, &head->chain);
434 	return 0;
435 }
436 
vsp1_dl_list_fill_header(struct vsp1_dl_list * dl,bool is_last)437 static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
438 {
439 	struct vsp1_dl_header_list *hdr = dl->header->lists;
440 	struct vsp1_dl_body *dlb;
441 	unsigned int num_lists = 0;
442 
443 	/*
444 	 * Fill the header with the display list bodies addresses and sizes. The
445 	 * address of the first body has already been filled when the display
446 	 * list was allocated.
447 	 */
448 
449 	hdr->num_bytes = dl->body0.num_entries
450 		       * sizeof(*dl->header->lists);
451 
452 	list_for_each_entry(dlb, &dl->fragments, list) {
453 		num_lists++;
454 		hdr++;
455 
456 		hdr->addr = dlb->dma;
457 		hdr->num_bytes = dlb->num_entries
458 			       * sizeof(*dl->header->lists);
459 	}
460 
461 	dl->header->num_lists = num_lists;
462 
463 	/*
464 	 * If this display list's chain is not empty, we are on a list, where
465 	 * the next item in the list is the display list entity which should be
466 	 * automatically queued by the hardware.
467 	 */
468 	if (!list_empty(&dl->chain) && !is_last) {
469 		struct vsp1_dl_list *next = list_next_entry(dl, chain);
470 
471 		dl->header->next_header = next->dma;
472 		dl->header->flags = VSP1_DLH_AUTO_START;
473 	} else {
474 		dl->header->flags = VSP1_DLH_INT_ENABLE;
475 	}
476 }
477 
vsp1_dl_list_commit(struct vsp1_dl_list * dl)478 void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
479 {
480 	struct vsp1_dl_manager *dlm = dl->dlm;
481 	struct vsp1_device *vsp1 = dlm->vsp1;
482 	unsigned long flags;
483 	bool update;
484 
485 	spin_lock_irqsave(&dlm->lock, flags);
486 
487 	if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
488 		struct vsp1_dl_list *dl_child;
489 
490 		/*
491 		 * In header mode the caller guarantees that the hardware is
492 		 * idle at this point.
493 		 */
494 
495 		/* Fill the header for the head and chained display lists. */
496 		vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
497 
498 		list_for_each_entry(dl_child, &dl->chain, chain) {
499 			bool last = list_is_last(&dl_child->chain, &dl->chain);
500 
501 			vsp1_dl_list_fill_header(dl_child, last);
502 		}
503 
504 		/*
505 		 * Commit the head display list to hardware. Chained headers
506 		 * will auto-start.
507 		 */
508 		vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
509 
510 		dlm->active = dl;
511 		goto done;
512 	}
513 
514 	/* Once the UPD bit has been set the hardware can start processing the
515 	 * display list at any time and we can't touch the address and size
516 	 * registers. In that case mark the update as pending, it will be
517 	 * queued up to the hardware by the frame end interrupt handler.
518 	 */
519 	update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
520 	if (update) {
521 		__vsp1_dl_list_put(dlm->pending);
522 		dlm->pending = dl;
523 		goto done;
524 	}
525 
526 	/* Program the hardware with the display list body address and size.
527 	 * The UPD bit will be cleared by the device when the display list is
528 	 * processed.
529 	 */
530 	vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
531 	vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
532 		   (dl->body0.num_entries * sizeof(*dl->header->lists)));
533 
534 	__vsp1_dl_list_put(dlm->queued);
535 	dlm->queued = dl;
536 
537 done:
538 	spin_unlock_irqrestore(&dlm->lock, flags);
539 }
540 
541 /* -----------------------------------------------------------------------------
542  * Display List Manager
543  */
544 
545 /* Interrupt Handling */
vsp1_dlm_irq_display_start(struct vsp1_dl_manager * dlm)546 void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
547 {
548 	spin_lock(&dlm->lock);
549 
550 	/* The display start interrupt signals the end of the display list
551 	 * processing by the device. The active display list, if any, won't be
552 	 * accessed anymore and can be reused.
553 	 */
554 	__vsp1_dl_list_put(dlm->active);
555 	dlm->active = NULL;
556 
557 	spin_unlock(&dlm->lock);
558 }
559 
vsp1_dlm_irq_frame_end(struct vsp1_dl_manager * dlm)560 void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
561 {
562 	struct vsp1_device *vsp1 = dlm->vsp1;
563 
564 	spin_lock(&dlm->lock);
565 
566 	__vsp1_dl_list_put(dlm->active);
567 	dlm->active = NULL;
568 
569 	/* Header mode is used for mem-to-mem pipelines only. We don't need to
570 	 * perform any operation as there can't be any new display list queued
571 	 * in that case.
572 	 */
573 	if (dlm->mode == VSP1_DL_MODE_HEADER)
574 		goto done;
575 
576 	/* The UPD bit set indicates that the commit operation raced with the
577 	 * interrupt and occurred after the frame end event and UPD clear but
578 	 * before interrupt processing. The hardware hasn't taken the update
579 	 * into account yet, we'll thus skip one frame and retry.
580 	 */
581 	if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
582 		goto done;
583 
584 	/* The device starts processing the queued display list right after the
585 	 * frame end interrupt. The display list thus becomes active.
586 	 */
587 	if (dlm->queued) {
588 		dlm->active = dlm->queued;
589 		dlm->queued = NULL;
590 	}
591 
592 	/* Now that the UPD bit has been cleared we can queue the next display
593 	 * list to the hardware if one has been prepared.
594 	 */
595 	if (dlm->pending) {
596 		struct vsp1_dl_list *dl = dlm->pending;
597 
598 		vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
599 		vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
600 			   (dl->body0.num_entries *
601 			    sizeof(*dl->header->lists)));
602 
603 		dlm->queued = dl;
604 		dlm->pending = NULL;
605 	}
606 
607 done:
608 	spin_unlock(&dlm->lock);
609 }
610 
611 /* Hardware Setup */
vsp1_dlm_setup(struct vsp1_device * vsp1)612 void vsp1_dlm_setup(struct vsp1_device *vsp1)
613 {
614 	u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
615 		 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
616 		 | VI6_DL_CTRL_DLE;
617 
618 	/* The DRM pipeline operates with display lists in Continuous Frame
619 	 * Mode, all other pipelines use manual start.
620 	 */
621 	if (vsp1->drm)
622 		ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
623 
624 	vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
625 	vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
626 }
627 
vsp1_dlm_reset(struct vsp1_dl_manager * dlm)628 void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
629 {
630 	unsigned long flags;
631 
632 	spin_lock_irqsave(&dlm->lock, flags);
633 
634 	__vsp1_dl_list_put(dlm->active);
635 	__vsp1_dl_list_put(dlm->queued);
636 	__vsp1_dl_list_put(dlm->pending);
637 
638 	spin_unlock_irqrestore(&dlm->lock, flags);
639 
640 	dlm->active = NULL;
641 	dlm->queued = NULL;
642 	dlm->pending = NULL;
643 }
644 
645 /*
646  * Free all fragments awaiting to be garbage-collected.
647  *
648  * This function must be called without the display list manager lock held.
649  */
vsp1_dlm_fragments_free(struct vsp1_dl_manager * dlm)650 static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
651 {
652 	unsigned long flags;
653 
654 	spin_lock_irqsave(&dlm->lock, flags);
655 
656 	while (!list_empty(&dlm->gc_fragments)) {
657 		struct vsp1_dl_body *dlb;
658 
659 		dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
660 				       list);
661 		list_del(&dlb->list);
662 
663 		spin_unlock_irqrestore(&dlm->lock, flags);
664 		vsp1_dl_fragment_free(dlb);
665 		spin_lock_irqsave(&dlm->lock, flags);
666 	}
667 
668 	spin_unlock_irqrestore(&dlm->lock, flags);
669 }
670 
vsp1_dlm_garbage_collect(struct work_struct * work)671 static void vsp1_dlm_garbage_collect(struct work_struct *work)
672 {
673 	struct vsp1_dl_manager *dlm =
674 		container_of(work, struct vsp1_dl_manager, gc_work);
675 
676 	vsp1_dlm_fragments_free(dlm);
677 }
678 
vsp1_dlm_create(struct vsp1_device * vsp1,unsigned int index,unsigned int prealloc)679 struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
680 					unsigned int index,
681 					unsigned int prealloc)
682 {
683 	struct vsp1_dl_manager *dlm;
684 	unsigned int i;
685 
686 	dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
687 	if (!dlm)
688 		return NULL;
689 
690 	dlm->index = index;
691 	dlm->mode = index == 0 && !vsp1->info->uapi
692 		  ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
693 	dlm->vsp1 = vsp1;
694 
695 	spin_lock_init(&dlm->lock);
696 	INIT_LIST_HEAD(&dlm->free);
697 	INIT_LIST_HEAD(&dlm->gc_fragments);
698 	INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
699 
700 	for (i = 0; i < prealloc; ++i) {
701 		struct vsp1_dl_list *dl;
702 
703 		dl = vsp1_dl_list_alloc(dlm);
704 		if (!dl)
705 			return NULL;
706 
707 		list_add_tail(&dl->list, &dlm->free);
708 	}
709 
710 	return dlm;
711 }
712 
vsp1_dlm_destroy(struct vsp1_dl_manager * dlm)713 void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
714 {
715 	struct vsp1_dl_list *dl, *next;
716 
717 	if (!dlm)
718 		return;
719 
720 	cancel_work_sync(&dlm->gc_work);
721 
722 	list_for_each_entry_safe(dl, next, &dlm->free, list) {
723 		list_del(&dl->list);
724 		vsp1_dl_list_free(dl);
725 	}
726 
727 	vsp1_dlm_fragments_free(dlm);
728 }
729