• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Samsung TV Mixer driver
3  *
4  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5  *
6  * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published
10  * by the Free Software Foundiation. either version 2 of the License,
11  * or (at your option) any later version
12  */
13 
14 #include "mixer.h"
15 #include "regs-mixer.h"
16 #include "regs-vp.h"
17 
18 #include <linux/delay.h>
19 
20 /* Register access subroutines */
21 
vp_read(struct mxr_device * mdev,u32 reg_id)22 static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
23 {
24 	return readl(mdev->res.vp_regs + reg_id);
25 }
26 
vp_write(struct mxr_device * mdev,u32 reg_id,u32 val)27 static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
28 {
29 	writel(val, mdev->res.vp_regs + reg_id);
30 }
31 
vp_write_mask(struct mxr_device * mdev,u32 reg_id,u32 val,u32 mask)32 static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
33 	u32 val, u32 mask)
34 {
35 	u32 old = vp_read(mdev, reg_id);
36 
37 	val = (val & mask) | (old & ~mask);
38 	writel(val, mdev->res.vp_regs + reg_id);
39 }
40 
mxr_read(struct mxr_device * mdev,u32 reg_id)41 static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
42 {
43 	return readl(mdev->res.mxr_regs + reg_id);
44 }
45 
mxr_write(struct mxr_device * mdev,u32 reg_id,u32 val)46 static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
47 {
48 	writel(val, mdev->res.mxr_regs + reg_id);
49 }
50 
mxr_write_mask(struct mxr_device * mdev,u32 reg_id,u32 val,u32 mask)51 static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
52 	u32 val, u32 mask)
53 {
54 	u32 old = mxr_read(mdev, reg_id);
55 
56 	val = (val & mask) | (old & ~mask);
57 	writel(val, mdev->res.mxr_regs + reg_id);
58 }
59 
mxr_vsync_set_update(struct mxr_device * mdev,int en)60 void mxr_vsync_set_update(struct mxr_device *mdev, int en)
61 {
62 	/* block update on vsync */
63 	mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
64 		MXR_STATUS_SYNC_ENABLE);
65 	vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
66 }
67 
__mxr_reg_vp_reset(struct mxr_device * mdev)68 static void __mxr_reg_vp_reset(struct mxr_device *mdev)
69 {
70 	int tries = 100;
71 
72 	vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
73 	for (tries = 100; tries; --tries) {
74 		/* waiting until VP_SRESET_PROCESSING is 0 */
75 		if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
76 			break;
77 		mdelay(10);
78 	}
79 	WARN(tries == 0, "failed to reset Video Processor\n");
80 }
81 
82 static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
83 
mxr_reg_reset(struct mxr_device * mdev)84 void mxr_reg_reset(struct mxr_device *mdev)
85 {
86 	unsigned long flags;
87 	u32 val; /* value stored to register */
88 
89 	spin_lock_irqsave(&mdev->reg_slock, flags);
90 	mxr_vsync_set_update(mdev, MXR_DISABLE);
91 
92 	/* set output in RGB888 mode */
93 	mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
94 
95 	/* 16 beat burst in DMA */
96 	mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
97 		MXR_STATUS_BURST_MASK);
98 
99 	/* setting default layer priority: layer1 > video > layer0
100 	 * because typical usage scenario would be
101 	 * layer0 - framebuffer
102 	 * video - video overlay
103 	 * layer1 - OSD
104 	 */
105 	val  = MXR_LAYER_CFG_GRP0_VAL(1);
106 	val |= MXR_LAYER_CFG_VP_VAL(2);
107 	val |= MXR_LAYER_CFG_GRP1_VAL(3);
108 	mxr_write(mdev, MXR_LAYER_CFG, val);
109 
110 	/* use dark gray background color */
111 	mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
112 	mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
113 	mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
114 
115 	/* setting graphical layers */
116 
117 	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
118 	val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
119 	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
120 
121 	/* the same configuration for both layers */
122 	mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
123 	mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
124 
125 	/* configuration of Video Processor Registers */
126 	__mxr_reg_vp_reset(mdev);
127 	mxr_reg_vp_default_filter(mdev);
128 
129 	/* enable all interrupts */
130 	mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
131 
132 	mxr_vsync_set_update(mdev, MXR_ENABLE);
133 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
134 }
135 
mxr_reg_graph_format(struct mxr_device * mdev,int idx,const struct mxr_format * fmt,const struct mxr_geometry * geo)136 void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
137 	const struct mxr_format *fmt, const struct mxr_geometry *geo)
138 {
139 	u32 val;
140 	unsigned long flags;
141 
142 	spin_lock_irqsave(&mdev->reg_slock, flags);
143 	mxr_vsync_set_update(mdev, MXR_DISABLE);
144 
145 	/* setup format */
146 	mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
147 		MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
148 
149 	/* setup geometry */
150 	mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
151 	val  = MXR_GRP_WH_WIDTH(geo->src.width);
152 	val |= MXR_GRP_WH_HEIGHT(geo->src.height);
153 	val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
154 	val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
155 	mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
156 
157 	/* setup offsets in source image */
158 	val  = MXR_GRP_SXY_SX(geo->src.x_offset);
159 	val |= MXR_GRP_SXY_SY(geo->src.y_offset);
160 	mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
161 
162 	/* setup offsets in display image */
163 	val  = MXR_GRP_DXY_DX(geo->dst.x_offset);
164 	val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
165 	mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
166 
167 	mxr_vsync_set_update(mdev, MXR_ENABLE);
168 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
169 }
170 
mxr_reg_vp_format(struct mxr_device * mdev,const struct mxr_format * fmt,const struct mxr_geometry * geo)171 void mxr_reg_vp_format(struct mxr_device *mdev,
172 	const struct mxr_format *fmt, const struct mxr_geometry *geo)
173 {
174 	unsigned long flags;
175 
176 	spin_lock_irqsave(&mdev->reg_slock, flags);
177 	mxr_vsync_set_update(mdev, MXR_DISABLE);
178 
179 	vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
180 
181 	/* setting size of input image */
182 	vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
183 		VP_IMG_VSIZE(geo->src.full_height));
184 	/* chroma height has to reduced by 2 to avoid chroma distorions */
185 	vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
186 		VP_IMG_VSIZE(geo->src.full_height / 2));
187 
188 	vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
189 	vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
190 	vp_write(mdev, VP_SRC_H_POSITION,
191 		VP_SRC_H_POSITION_VAL(geo->src.x_offset));
192 	vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
193 
194 	vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
195 	vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
196 	if (geo->dst.field == V4L2_FIELD_INTERLACED) {
197 		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
198 		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
199 	} else {
200 		vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
201 		vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
202 	}
203 
204 	vp_write(mdev, VP_H_RATIO, geo->x_ratio);
205 	vp_write(mdev, VP_V_RATIO, geo->y_ratio);
206 
207 	vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
208 
209 	mxr_vsync_set_update(mdev, MXR_ENABLE);
210 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
211 
212 }
213 
mxr_reg_graph_buffer(struct mxr_device * mdev,int idx,dma_addr_t addr)214 void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
215 {
216 	u32 val = addr ? ~0 : 0;
217 	unsigned long flags;
218 
219 	spin_lock_irqsave(&mdev->reg_slock, flags);
220 	mxr_vsync_set_update(mdev, MXR_DISABLE);
221 
222 	if (idx == 0)
223 		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
224 	else
225 		mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
226 	mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
227 
228 	mxr_vsync_set_update(mdev, MXR_ENABLE);
229 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
230 }
231 
mxr_reg_vp_buffer(struct mxr_device * mdev,dma_addr_t luma_addr[2],dma_addr_t chroma_addr[2])232 void mxr_reg_vp_buffer(struct mxr_device *mdev,
233 	dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
234 {
235 	u32 val = luma_addr[0] ? ~0 : 0;
236 	unsigned long flags;
237 
238 	spin_lock_irqsave(&mdev->reg_slock, flags);
239 	mxr_vsync_set_update(mdev, MXR_DISABLE);
240 
241 	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
242 	vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
243 	/* TODO: fix tiled mode */
244 	vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
245 	vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
246 	vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
247 	vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
248 
249 	mxr_vsync_set_update(mdev, MXR_ENABLE);
250 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
251 }
252 
mxr_irq_layer_handle(struct mxr_layer * layer)253 static void mxr_irq_layer_handle(struct mxr_layer *layer)
254 {
255 	struct list_head *head = &layer->enq_list;
256 	struct mxr_buffer *done;
257 
258 	/* skip non-existing layer */
259 	if (layer == NULL)
260 		return;
261 
262 	spin_lock(&layer->enq_slock);
263 	if (layer->state == MXR_LAYER_IDLE)
264 		goto done;
265 
266 	done = layer->shadow_buf;
267 	layer->shadow_buf = layer->update_buf;
268 
269 	if (list_empty(head)) {
270 		if (layer->state != MXR_LAYER_STREAMING)
271 			layer->update_buf = NULL;
272 	} else {
273 		struct mxr_buffer *next;
274 		next = list_first_entry(head, struct mxr_buffer, list);
275 		list_del(&next->list);
276 		layer->update_buf = next;
277 	}
278 
279 	layer->ops.buffer_set(layer, layer->update_buf);
280 
281 	if (done && done != layer->shadow_buf)
282 		vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
283 
284 done:
285 	spin_unlock(&layer->enq_slock);
286 }
287 
mxr_irq_handler(int irq,void * dev_data)288 irqreturn_t mxr_irq_handler(int irq, void *dev_data)
289 {
290 	struct mxr_device *mdev = dev_data;
291 	u32 i, val;
292 
293 	spin_lock(&mdev->reg_slock);
294 	val = mxr_read(mdev, MXR_INT_STATUS);
295 
296 	/* wake up process waiting for VSYNC */
297 	if (val & MXR_INT_STATUS_VSYNC) {
298 		set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
299 		/* toggle TOP field event if working in interlaced mode */
300 		if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
301 			change_bit(MXR_EVENT_TOP, &mdev->event_flags);
302 		wake_up(&mdev->event_queue);
303 		/* vsync interrupt use different bit for read and clear */
304 		val &= ~MXR_INT_STATUS_VSYNC;
305 		val |= MXR_INT_CLEAR_VSYNC;
306 	}
307 
308 	/* clear interrupts */
309 	mxr_write(mdev, MXR_INT_STATUS, val);
310 
311 	spin_unlock(&mdev->reg_slock);
312 	/* leave on non-vsync event */
313 	if (~val & MXR_INT_CLEAR_VSYNC)
314 		return IRQ_HANDLED;
315 	/* skip layer update on bottom field */
316 	if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
317 		return IRQ_HANDLED;
318 	for (i = 0; i < MXR_MAX_LAYERS; ++i)
319 		mxr_irq_layer_handle(mdev->layer[i]);
320 	return IRQ_HANDLED;
321 }
322 
mxr_reg_s_output(struct mxr_device * mdev,int cookie)323 void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
324 {
325 	u32 val;
326 
327 	val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
328 	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
329 }
330 
mxr_reg_streamon(struct mxr_device * mdev)331 void mxr_reg_streamon(struct mxr_device *mdev)
332 {
333 	unsigned long flags;
334 
335 	spin_lock_irqsave(&mdev->reg_slock, flags);
336 	/* single write -> no need to block vsync update */
337 
338 	/* start MIXER */
339 	mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
340 	set_bit(MXR_EVENT_TOP, &mdev->event_flags);
341 
342 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
343 }
344 
mxr_reg_streamoff(struct mxr_device * mdev)345 void mxr_reg_streamoff(struct mxr_device *mdev)
346 {
347 	unsigned long flags;
348 
349 	spin_lock_irqsave(&mdev->reg_slock, flags);
350 	/* single write -> no need to block vsync update */
351 
352 	/* stop MIXER */
353 	mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
354 
355 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
356 }
357 
mxr_reg_wait4vsync(struct mxr_device * mdev)358 int mxr_reg_wait4vsync(struct mxr_device *mdev)
359 {
360 	long time_left;
361 
362 	clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
363 	/* TODO: consider adding interruptible */
364 	time_left = wait_event_timeout(mdev->event_queue,
365 			test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
366 				 msecs_to_jiffies(1000));
367 	if (time_left > 0)
368 		return 0;
369 	mxr_warn(mdev, "no vsync detected - timeout\n");
370 	return -ETIME;
371 }
372 
mxr_reg_set_mbus_fmt(struct mxr_device * mdev,struct v4l2_mbus_framefmt * fmt)373 void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
374 	struct v4l2_mbus_framefmt *fmt)
375 {
376 	u32 val = 0;
377 	unsigned long flags;
378 
379 	spin_lock_irqsave(&mdev->reg_slock, flags);
380 	mxr_vsync_set_update(mdev, MXR_DISABLE);
381 
382 	/* selecting colorspace accepted by output */
383 	if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
384 		val |= MXR_CFG_OUT_YUV444;
385 	else
386 		val |= MXR_CFG_OUT_RGB888;
387 
388 	/* choosing between interlace and progressive mode */
389 	if (fmt->field == V4L2_FIELD_INTERLACED)
390 		val |= MXR_CFG_SCAN_INTERLACE;
391 	else
392 		val |= MXR_CFG_SCAN_PROGRASSIVE;
393 
394 	/* choosing between porper HD and SD mode */
395 	if (fmt->height == 480)
396 		val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
397 	else if (fmt->height == 576)
398 		val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
399 	else if (fmt->height == 720)
400 		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
401 	else if (fmt->height == 1080)
402 		val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
403 	else
404 		WARN(1, "unrecognized mbus height %u!\n", fmt->height);
405 
406 	mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
407 		MXR_CFG_OUT_MASK);
408 
409 	val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
410 	vp_write_mask(mdev, VP_MODE, val,
411 		VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
412 
413 	mxr_vsync_set_update(mdev, MXR_ENABLE);
414 	spin_unlock_irqrestore(&mdev->reg_slock, flags);
415 }
416 
mxr_reg_graph_layer_stream(struct mxr_device * mdev,int idx,int en)417 void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
418 {
419 	/* no extra actions need to be done */
420 }
421 
mxr_reg_vp_layer_stream(struct mxr_device * mdev,int en)422 void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
423 {
424 	/* no extra actions need to be done */
425 }
426 
427 static const u8 filter_y_horiz_tap8[] = {
428 	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1,
429 	-1,	-1,	-1,	-1,	-1,	0,	0,	0,
430 	0,	2,	4,	5,	6,	6,	6,	6,
431 	6,	5,	5,	4,	3,	2,	1,	1,
432 	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20,
433 	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2,
434 	127,	126,	125,	121,	114,	107,	99,	89,
435 	79,	68,	57,	46,	35,	25,	16,	8,
436 };
437 
438 static const u8 filter_y_vert_tap4[] = {
439 	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
440 	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
441 	127,	126,	124,	118,	111,	102,	92,	81,
442 	70,	59,	48,	37,	27,	19,	11,	5,
443 	0,	5,	11,	19,	27,	37,	48,	59,
444 	70,	81,	92,	102,	111,	118,	124,	126,
445 	0,	0,	-1,	-1,	-2,	-3,	-4,	-5,
446 	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3,
447 };
448 
449 static const u8 filter_cr_horiz_tap4[] = {
450 	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
451 	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
452 	127,	126,	124,	118,	111,	102,	92,	81,
453 	70,	59,	48,	37,	27,	19,	11,	5,
454 };
455 
mxr_reg_vp_filter_set(struct mxr_device * mdev,int reg_id,const u8 * data,unsigned int size)456 static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
457 	int reg_id, const u8 *data, unsigned int size)
458 {
459 	/* assure 4-byte align */
460 	BUG_ON(size & 3);
461 	for (; size; size -= 4, reg_id += 4, data += 4) {
462 		u32 val = (data[0] << 24) |  (data[1] << 16) |
463 			(data[2] << 8) | data[3];
464 		vp_write(mdev, reg_id, val);
465 	}
466 }
467 
mxr_reg_vp_default_filter(struct mxr_device * mdev)468 static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
469 {
470 	mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
471 		filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
472 	mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
473 		filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
474 	mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
475 		filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
476 }
477 
mxr_reg_mxr_dump(struct mxr_device * mdev)478 static void mxr_reg_mxr_dump(struct mxr_device *mdev)
479 {
480 #define DUMPREG(reg_id) \
481 do { \
482 	mxr_dbg(mdev, #reg_id " = %08x\n", \
483 		(u32)readl(mdev->res.mxr_regs + reg_id)); \
484 } while (0)
485 
486 	DUMPREG(MXR_STATUS);
487 	DUMPREG(MXR_CFG);
488 	DUMPREG(MXR_INT_EN);
489 	DUMPREG(MXR_INT_STATUS);
490 
491 	DUMPREG(MXR_LAYER_CFG);
492 	DUMPREG(MXR_VIDEO_CFG);
493 
494 	DUMPREG(MXR_GRAPHIC0_CFG);
495 	DUMPREG(MXR_GRAPHIC0_BASE);
496 	DUMPREG(MXR_GRAPHIC0_SPAN);
497 	DUMPREG(MXR_GRAPHIC0_WH);
498 	DUMPREG(MXR_GRAPHIC0_SXY);
499 	DUMPREG(MXR_GRAPHIC0_DXY);
500 
501 	DUMPREG(MXR_GRAPHIC1_CFG);
502 	DUMPREG(MXR_GRAPHIC1_BASE);
503 	DUMPREG(MXR_GRAPHIC1_SPAN);
504 	DUMPREG(MXR_GRAPHIC1_WH);
505 	DUMPREG(MXR_GRAPHIC1_SXY);
506 	DUMPREG(MXR_GRAPHIC1_DXY);
507 #undef DUMPREG
508 }
509 
mxr_reg_vp_dump(struct mxr_device * mdev)510 static void mxr_reg_vp_dump(struct mxr_device *mdev)
511 {
512 #define DUMPREG(reg_id) \
513 do { \
514 	mxr_dbg(mdev, #reg_id " = %08x\n", \
515 		(u32) readl(mdev->res.vp_regs + reg_id)); \
516 } while (0)
517 
518 
519 	DUMPREG(VP_ENABLE);
520 	DUMPREG(VP_SRESET);
521 	DUMPREG(VP_SHADOW_UPDATE);
522 	DUMPREG(VP_FIELD_ID);
523 	DUMPREG(VP_MODE);
524 	DUMPREG(VP_IMG_SIZE_Y);
525 	DUMPREG(VP_IMG_SIZE_C);
526 	DUMPREG(VP_PER_RATE_CTRL);
527 	DUMPREG(VP_TOP_Y_PTR);
528 	DUMPREG(VP_BOT_Y_PTR);
529 	DUMPREG(VP_TOP_C_PTR);
530 	DUMPREG(VP_BOT_C_PTR);
531 	DUMPREG(VP_ENDIAN_MODE);
532 	DUMPREG(VP_SRC_H_POSITION);
533 	DUMPREG(VP_SRC_V_POSITION);
534 	DUMPREG(VP_SRC_WIDTH);
535 	DUMPREG(VP_SRC_HEIGHT);
536 	DUMPREG(VP_DST_H_POSITION);
537 	DUMPREG(VP_DST_V_POSITION);
538 	DUMPREG(VP_DST_WIDTH);
539 	DUMPREG(VP_DST_HEIGHT);
540 	DUMPREG(VP_H_RATIO);
541 	DUMPREG(VP_V_RATIO);
542 
543 #undef DUMPREG
544 }
545 
mxr_reg_dump(struct mxr_device * mdev)546 void mxr_reg_dump(struct mxr_device *mdev)
547 {
548 	mxr_reg_mxr_dump(mdev);
549 	mxr_reg_vp_dump(mdev);
550 }
551 
552