• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1diff -Naur a/drivers/clk/meson/clk-mux.c b/drivers/clk/meson/clk-mux.c
2--- a/drivers/clk/meson/clk-mux.c	1970-01-01 08:00:00.000000000 +0800
3+++ b/drivers/clk/meson/clk-mux.c	2023-02-23 17:02:04.947750903 +0800
4@@ -0,0 +1,214 @@
5+/*
6+ * drivers/amlogic/clk/clk-mux.c
7+ *
8+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
9+ *
10+ * This program is free software; you can redistribute it and/or modify
11+ * it under the terms of the GNU General Public License as published by
12+ * the Free Software Foundation; either version 2 of the License, or
13+ * (at your option) any later version.
14+ *
15+ * This program is distributed in the hope that it will be useful, but WITHOUT
16+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18+ * more details.
19+ *
20+ */
21+
22+#include <linux/clk.h>
23+#include <linux/clk-provider.h>
24+#include <linux/delay.h>
25+#include <linux/err.h>
26+#include <linux/io.h>
27+#include <linux/module.h>
28+#include <linux/of_address.h>
29+#include <linux/slab.h>
30+#include <linux/string.h>
31+
32+#include "clk-mux.h"
33+
34+
35+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
36+
37+static inline u32 clk_mux_readl(struct clk_mux *mux)
38+{
39+	if (mux->flags & CLK_MUX_BIG_ENDIAN)
40+		return ioread32be(mux->reg);
41+
42+	return readl(mux->reg);
43+}
44+
45+static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
46+{
47+	if (mux->flags & CLK_MUX_BIG_ENDIAN)
48+		iowrite32be(val, mux->reg);
49+	else
50+		writel(val, mux->reg);
51+}
52+
53+
54+static u8 meson_clk_mux_get_parent(struct clk_hw *hw)
55+{
56+	struct clk_mux *mux = to_clk_mux(hw);
57+	int num_parents = clk_hw_get_num_parents(hw);
58+	u32 val;
59+
60+	/*
61+	 * FIXME need a mux-specific flag to determine if val is bitwise or
62+	 * numeric. e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges
63+	 * from 0x1 to 0x7 (index starts at one)
64+	 * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
65+	 * val = 0x4 really means "bit 2, index starts at bit 0"
66+	 */
67+	val = clk_mux_readl(mux) >> mux->shift;
68+	val &= mux->mask;
69+
70+	if (mux->table) {
71+		int i;
72+
73+		for (i = 0; i < num_parents; i++)
74+			if (mux->table[i] == val)
75+				return i;
76+		return -EINVAL;
77+	}
78+
79+	if (val && (mux->flags & CLK_MUX_INDEX_BIT))
80+		val = ffs(val) - 1;
81+
82+	if (val && (mux->flags & CLK_MUX_INDEX_ONE))
83+		val--;
84+
85+	if (val >= num_parents)
86+		return -EINVAL;
87+
88+	return val;
89+}
90+
91+static int meson_clk_mux_set_parent(struct clk_hw *hw, u8 index)
92+{
93+	struct clk_mux *mux = to_clk_mux(hw);
94+	u32 val;
95+	unsigned long flags = 0;
96+
97+	if (mux->table) {
98+		index = mux->table[index];
99+	} else {
100+		if (mux->flags & CLK_MUX_INDEX_BIT)
101+			index = (1 << ffs(index));
102+
103+		if (mux->flags & CLK_MUX_INDEX_ONE)
104+			index++;
105+	}
106+
107+	if (mux->lock)
108+		spin_lock_irqsave(mux->lock, flags);
109+	else
110+		__acquire(mux->lock);
111+
112+	if (mux->flags & CLK_MUX_HIWORD_MASK) {
113+		val = mux->mask << (mux->shift + 16);
114+	} else {
115+		val = clk_mux_readl(mux);
116+		val &= ~(mux->mask << mux->shift);
117+	}
118+
119+	val |= index << mux->shift;
120+	clk_mux_writel(mux, val);
121+
122+	if (mux->lock)
123+		spin_unlock_irqrestore(mux->lock, flags);
124+	else
125+		__release(mux->lock);
126+
127+	return 0;
128+}
129+
130+static unsigned long meson_clk_mux_recalc_rate(struct clk_hw *hw,
131+					       unsigned long parent_rate)
132+{
133+	struct clk_hw *parent_hw;
134+	u32 index = 0;
135+	unsigned long new_parent_rate;
136+
137+	index = meson_clk_mux_get_parent(hw);
138+
139+	parent_hw = clk_hw_get_parent_by_index(hw, index);
140+	new_parent_rate = clk_hw_get_rate(parent_hw);
141+	if (new_parent_rate != parent_rate)
142+		clk_set_parent(hw->clk, parent_hw->clk);
143+
144+	return new_parent_rate;
145+}
146+
147+int meson_clk_mux_determine_rate(struct clk_hw *hw,
148+			     struct clk_rate_request *req)
149+{
150+	struct clk_hw *parent, *best_parent = NULL;
151+	int i, num_parents, ret;
152+	unsigned long best = 0;
153+	struct clk_rate_request parent_req = *req;
154+	struct clk_mux *mux = to_clk_mux(hw);
155+
156+	num_parents = clk_hw_get_num_parents(hw);
157+
158+	if ((num_parents == 2) && (mux->flags == CLK_PARENT_ALTERNATE)) {
159+		i = meson_clk_mux_get_parent(hw);
160+		i = (i + 1) % 2;
161+
162+		best_parent = clk_hw_get_parent_by_index(hw, i);
163+		best = clk_hw_get_rate(best_parent);
164+		if (best != parent_req.rate) {
165+			ret = clk_set_rate(best_parent->clk, parent_req.rate);
166+			if (ret)
167+				pr_err("Fail! Can not set to %lu, cur rate: %lu\n",
168+				   parent_req.rate, best);
169+			else {
170+				best = clk_hw_get_rate(best_parent);
171+				pr_debug("success set parent %s rate to %lu\n",
172+					clk_hw_get_name(best_parent), best);
173+				if (!(clk_hw_get_flags(hw) &
174+						CLK_SET_RATE_UNGATE)) {
175+					clk_prepare(best_parent->clk);
176+					clk_enable(best_parent->clk);
177+				}
178+			}
179+		}
180+	} else {
181+		for (i = 0; i < num_parents; i++) {
182+			parent = clk_hw_get_parent_by_index(hw, i);
183+			if (!parent)
184+				continue;
185+
186+			if (mux->flags & CLK_SET_RATE_PARENT) {
187+				parent_req = *req;
188+				ret = __clk_determine_rate(parent, &parent_req);
189+				if (ret)
190+					continue;
191+			} else {
192+				parent_req.rate = clk_hw_get_rate(parent);
193+			}
194+		}
195+	}
196+
197+	if (!best_parent)
198+		return -EINVAL;
199+
200+	if (best_parent)
201+		req->best_parent_hw = best_parent;
202+
203+	req->best_parent_rate = best;
204+	req->rate = best;
205+
206+	return 0;
207+}
208+
209+const struct clk_ops meson_clk_mux_ops = {
210+	.get_parent = meson_clk_mux_get_parent,
211+	.set_parent = meson_clk_mux_set_parent,
212+	.determine_rate = meson_clk_mux_determine_rate,
213+	.recalc_rate = meson_clk_mux_recalc_rate,
214+};
215+
216+const struct clk_ops meson_clk_mux_ro_ops = {
217+	.get_parent = meson_clk_mux_get_parent,
218+};
219diff -Naur a/drivers/clk/meson/clk-mux.h b/drivers/clk/meson/clk-mux.h
220--- a/drivers/clk/meson/clk-mux.h	1970-01-01 08:00:00.000000000 +0800
221+++ b/drivers/clk/meson/clk-mux.h	2023-02-23 17:02:04.947750903 +0800
222@@ -0,0 +1,19 @@
223+/* SPDX-License-Identifier: GPL-2.0 */
224+/*
225+ * Copyright (c) 2019 BayLibre, SAS.
226+ * Author: Jerome Brunet <jbrunet@baylibre.com>
227+ */
228+
229+#ifndef __MESON_CLK_MUX_H
230+#define __MESON_CLK_MUX_H
231+
232+#include <linux/clk-provider.h>
233+#include <linux/regmap.h>
234+#include "parm.h"
235+
236+#define CLK_PARENT_ALTERNATE BIT(5)
237+
238+extern const struct clk_ops meson_clk_mux_ops;
239+
240+
241+#endif /* __MESON_CLK_MUX_H */
242diff -Naur a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
243--- a/drivers/clk/meson/clk-pll.c	2022-12-19 17:13:11.809508587 +0800
244+++ b/drivers/clk/meson/clk-pll.c	2023-02-23 17:02:04.947750903 +0800
245@@ -283,6 +283,8 @@
246 		delay--;
247 	} while (delay > 0);
248
249+	pr_warn("%s: pll %s did not lock\n", __func__, clk_hw_get_name(hw));
250+
251 	return -ETIMEDOUT;
252 }
253
254@@ -339,6 +341,9 @@
255 	/* Enable the pll */
256 	meson_parm_write(clk->map, &pll->en, 1);
257
258+	/* Reset delay */
259+	udelay(pll->rst_delay_us);
260+
261 	/* Take the pll out reset */
262 	meson_parm_write(clk->map, &pll->rst, 0);
263
264diff -Naur a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
265--- a/drivers/clk/meson/clk-pll.h	2022-12-19 17:13:11.809508587 +0800
266+++ b/drivers/clk/meson/clk-pll.h	2023-02-23 17:02:04.947750903 +0800
267@@ -41,6 +41,7 @@
268 	const struct pll_params_table *table;
269 	const struct pll_mult_range *range;
270 	u8 flags;
271+	u32 rst_delay_us;
272 };
273
274 extern const struct clk_ops meson_clk_pll_ro_ops;
275diff -Naur a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
276--- a/drivers/clk/meson/g12a.c	2022-12-19 17:13:11.813508635 +0800
277+++ b/drivers/clk/meson/g12a.c	2023-02-23 17:02:04.951750958 +0800
278@@ -15,6 +15,7 @@
279 #include <linux/of_device.h>
280 #include <linux/platform_device.h>
281 #include <linux/clk.h>
282+#include <linux/of_address.h>
283
284 #include "clk-mpll.h"
285 #include "clk-pll.h"
286@@ -23,8 +24,9 @@
287 #include "vid-pll-div.h"
288 #include "meson-eeclk.h"
289 #include "g12a.h"
290+#include "g12a-vcodec-clk.h"
291
292-static DEFINE_SPINLOCK(meson_clk_lock);
293+/*static*/ DEFINE_SPINLOCK(meson_clk_lock);
294
295 static struct clk_regmap g12a_fixed_pll_dco = {
296 	.data = &(struct meson_clk_pll_data){
297@@ -1602,20 +1604,21 @@
298 };
299
300 static const struct pll_mult_range g12a_gp0_pll_mult_range = {
301-	.min = 125,
302-	.max = 255,
303+	.min = 132, /* Force M to 132 */
304+	.max = 132,
305 };
306
307 /*
308  * Internal gp0 pll emulation configuration parameters
309  */
310 static const struct reg_sequence g12a_gp0_init_regs[] = {
311+	{ .reg = HHI_GP0_PLL_CNTL0,	.def = 0x08000000 },
312 	{ .reg = HHI_GP0_PLL_CNTL1,	.def = 0x00000000 },
313 	{ .reg = HHI_GP0_PLL_CNTL2,	.def = 0x00000000 },
314-	{ .reg = HHI_GP0_PLL_CNTL3,	.def = 0x48681c00 },
315-	{ .reg = HHI_GP0_PLL_CNTL4,	.def = 0x33771290 },
316+	{ .reg = HHI_GP0_PLL_CNTL3,	.def = 0x6a285c00},
317+	{ .reg = HHI_GP0_PLL_CNTL4,	.def = 0x65771290},
318 	{ .reg = HHI_GP0_PLL_CNTL5,	.def = 0x39272000 },
319-	{ .reg = HHI_GP0_PLL_CNTL6,	.def = 0x56540000 },
320+	{ .reg = HHI_GP0_PLL_CNTL6,	.def = 0x56540000, .delay_us = 10 },
321 };
322
323 static struct clk_regmap g12a_gp0_pll_dco = {
324@@ -1653,6 +1656,7 @@
325 		.range = &g12a_gp0_pll_mult_range,
326 		.init_regs = g12a_gp0_init_regs,
327 		.init_count = ARRAY_SIZE(g12a_gp0_init_regs),
328+		.rst_delay_us = 100,
329 	},
330 	.hw.init = &(struct clk_init_data){
331 		.name = "gp0_pll_dco",
332@@ -3657,6 +3661,86 @@
333 	},
334 };
335
336+/* MIPI DSI Host Clocks */
337+
338+static const struct clk_hw *g12a_mipi_dsi_pxclk_parent_hws[] = {
339+	&g12a_vid_pll.hw,
340+	&g12a_gp0_pll.hw,
341+	&g12a_hifi_pll.hw,
342+	&g12a_mpll1.hw,
343+	&g12a_fclk_div2.hw,
344+	&g12a_fclk_div2p5.hw,
345+	&g12a_fclk_div3.hw,
346+	&g12a_fclk_div7.hw,
347+};
348+
349+static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
350+	.data = &(struct clk_regmap_mux_data){
351+		.offset = HHI_MIPIDSI_PHY_CLK_CNTL,
352+		.mask = 0x7,
353+		.shift = 12,
354+		.flags = CLK_MUX_ROUND_CLOSEST,
355+	},
356+	.hw.init = &(struct clk_init_data){
357+		.name = "mipi_dsi_pxclk_sel",
358+		.ops = &clk_regmap_mux_ops,
359+		.parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
360+		.num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
361+		.flags = CLK_SET_RATE_NO_REPARENT,
362+	},
363+};
364+
365+static struct clk_regmap g12a_mipi_dsi_pxclk_div = {
366+	.data = &(struct clk_regmap_div_data){
367+		.offset = HHI_MIPIDSI_PHY_CLK_CNTL,
368+		.shift = 0,
369+		.width = 7,
370+	},
371+	.hw.init = &(struct clk_init_data){
372+		.name = "mipi_dsi_pxclk_div",
373+		.ops = &clk_regmap_divider_ops,
374+		.parent_hws = (const struct clk_hw *[]) {
375+			&g12a_mipi_dsi_pxclk_sel.hw
376+		},
377+		.num_parents = 1,
378+		.flags = CLK_SET_RATE_PARENT,
379+	},
380+};
381+
382+static struct clk_regmap g12a_mipi_dsi_pxclk = {
383+	.data = &(struct clk_regmap_gate_data){
384+		.offset = HHI_MIPIDSI_PHY_CLK_CNTL,
385+		.bit_idx = 8,
386+	},
387+	.hw.init = &(struct clk_init_data) {
388+		.name = "mipi_dsi_pxclk",
389+		.ops = &clk_regmap_gate_ops,
390+		.parent_hws = (const struct clk_hw *[]) {
391+			&g12a_mipi_dsi_pxclk_div.hw
392+		},
393+		.num_parents = 1,
394+		.flags = CLK_SET_RATE_PARENT,
395+	},
396+};
397+
398+/* GPIO 24M */
399+static struct clk_regmap g12a_24m = {
400+	.data = &(struct clk_regmap_gate_data){
401+		.offset = HHI_XTAL_DIVN_CNTL,
402+		.bit_idx = 6,
403+	},
404+	.hw.init = &(struct clk_init_data){
405+		.name = "g12a_24m",
406+		.ops = &clk_regmap_gate_ops,
407+		.parent_names = (const char *[]){ "xtal" },
408+		.parent_data = &(const struct clk_parent_data) {
409+			.fw_name = "xtal",
410+		},
411+		.num_parents = 1,
412+		.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
413+	},
414+};
415+
416 /* HDMI Clocks */
417
418 static const struct clk_parent_data g12a_hdmi_parent_data[] = {
419@@ -4099,6 +4183,96 @@
420 	},
421 };
422
423+static const char * const media_parent_names[] = { "xtal",
424+	"gp0_pll", "hifi_pll", "fclk_div2p5", "fclk_div3", "fclk_div4",
425+	"fclk_div5",  "fclk_div7"};
426+
427+static const char * const media_parent_names_mipi[] = { "xtal",
428+	"gp0_pll", "mpll1", "mpll2", "fclk_div3", "fclk_div4",
429+	"fclk_div5",  "fclk_div7"};
430+
431+static struct clk_mux cts_mipi_isp_clk_mux = {
432+	.reg = (void *)HHI_MIPI_ISP_CLK_CNTL,
433+	.mask = 0x7,
434+	.shift = 9,
435+	.lock = &meson_clk_lock,
436+	.hw.init = &(struct clk_init_data){
437+		.name = "cts_mipi_isp_clk_mux",
438+		.ops = &clk_mux_ops,
439+		.parent_names = media_parent_names,
440+		.num_parents = 8,
441+		.flags = CLK_GET_RATE_NOCACHE,
442+	},
443+};
444+
445+static struct clk_divider cts_mipi_isp_clk_div = {
446+	.reg = (void *)HHI_MIPI_ISP_CLK_CNTL,
447+	.shift = 0,
448+	.width = 7,
449+	.lock = &meson_clk_lock,
450+	.hw.init = &(struct clk_init_data){
451+		.name = "cts_mipi_isp_clk_div",
452+		.ops = &clk_divider_ops,
453+		.parent_names = (const char *[]){ "cts_mipi_isp_clk_mux" },
454+		.num_parents = 1,
455+		.flags = CLK_GET_RATE_NOCACHE,
456+	},
457+};
458+
459+static struct clk_gate cts_mipi_isp_clk_gate = {
460+	.reg = (void *)HHI_MIPI_ISP_CLK_CNTL,
461+	.bit_idx = 8,
462+	.lock = &meson_clk_lock,
463+	.hw.init = &(struct clk_init_data) {
464+		.name = "cts_mipi_isp_clk_gate",
465+		.ops = &clk_gate_ops,
466+		.parent_names = (const char *[]){ "cts_mipi_isp_clk_div" },
467+		.num_parents = 1,
468+		.flags = CLK_GET_RATE_NOCACHE,
469+	},
470+};
471+
472+static struct clk_mux cts_mipi_csi_phy_clk0_mux = {
473+	.reg = (void *)HHI_MIPI_CSI_PHY_CLK_CNTL,
474+	.mask = 0x7,
475+	.shift = 9,
476+	.lock = &meson_clk_lock,
477+	.hw.init = &(struct clk_init_data){
478+		.name = "cts_mipi_csi_phy_clk0_mux",
479+		.ops = &clk_mux_ops,
480+		.parent_names = media_parent_names_mipi,
481+		.num_parents = 8,
482+		.flags = CLK_GET_RATE_NOCACHE,
483+	},
484+};
485+
486+static struct clk_divider cts_mipi_csi_phy_clk0_div = {
487+	.reg = (void *)HHI_MIPI_CSI_PHY_CLK_CNTL,
488+	.shift = 0,
489+	.width = 7,
490+	.lock = &meson_clk_lock,
491+	.hw.init = &(struct clk_init_data){
492+		.name = "cts_mipi_csi_phy_clk0_div",
493+		.ops = &clk_divider_ops,
494+		.parent_names = (const char *[]){ "cts_mipi_csi_phy_clk0_mux" },
495+		.num_parents = 1,
496+		.flags = CLK_GET_RATE_NOCACHE,
497+	},
498+};
499+
500+static struct clk_gate cts_mipi_csi_phy_clk0_gate = {
501+	.reg = (void *)HHI_MIPI_CSI_PHY_CLK_CNTL,
502+	.bit_idx = 8,
503+	.lock = &meson_clk_lock,
504+	.hw.init = &(struct clk_init_data) {
505+		.name = "cts_mipi_csi_phy_clk0_gate",
506+		.ops = &clk_gate_ops,
507+		.parent_names = (const char *[]){ "cts_mipi_csi_phy_clk0_div" },
508+		.num_parents = 1,
509+		.flags = CLK_GET_RATE_NOCACHE,
510+	},
511+};
512+
513 #define MESON_GATE(_name, _reg, _bit) \
514 	MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
515
516@@ -4402,6 +4576,9 @@
517 		[CLKID_SPICC1_SCLK_SEL]		= &g12a_spicc1_sclk_sel.hw,
518 		[CLKID_SPICC1_SCLK_DIV]		= &g12a_spicc1_sclk_div.hw,
519 		[CLKID_SPICC1_SCLK]		= &g12a_spicc1_sclk.hw,
520+		[CLKID_MIPI_DSI_PXCLK_SEL]	= &g12a_mipi_dsi_pxclk_sel.hw,
521+		[CLKID_MIPI_DSI_PXCLK_DIV]	= &g12a_mipi_dsi_pxclk_div.hw,
522+		[CLKID_MIPI_DSI_PXCLK]		= &g12a_mipi_dsi_pxclk.hw,
523 		[NR_CLKS]			= NULL,
524 	},
525 	.num = NR_CLKS,
526@@ -4657,6 +4834,16 @@
527 		[CLKID_SPICC1_SCLK_SEL]		= &g12a_spicc1_sclk_sel.hw,
528 		[CLKID_SPICC1_SCLK_DIV]		= &g12a_spicc1_sclk_div.hw,
529 		[CLKID_SPICC1_SCLK]		= &g12a_spicc1_sclk.hw,
530+		[CLKID_NNA_AXI_CLK_SEL]		= &sm1_nna_axi_clk_sel.hw,
531+		[CLKID_NNA_AXI_CLK_DIV]		= &sm1_nna_axi_clk_div.hw,
532+		[CLKID_NNA_AXI_CLK]		= &sm1_nna_axi_clk.hw,
533+		[CLKID_NNA_CORE_CLK_SEL]	= &sm1_nna_core_clk_sel.hw,
534+		[CLKID_NNA_CORE_CLK_DIV]	= &sm1_nna_core_clk_div.hw,
535+		[CLKID_NNA_CORE_CLK]		= &sm1_nna_core_clk.hw,
536+		[CLKID_MIPI_DSI_PXCLK_SEL]	= &g12a_mipi_dsi_pxclk_sel.hw,
537+		[CLKID_MIPI_DSI_PXCLK_DIV]	= &g12a_mipi_dsi_pxclk_div.hw,
538+		[CLKID_MIPI_DSI_PXCLK]		= &g12a_mipi_dsi_pxclk.hw,
539+		[CLKID_24M]			= &g12a_24m.hw,
540 		[NR_CLKS]			= NULL,
541 	},
542 	.num = NR_CLKS,
543@@ -4903,6 +5090,9 @@
544 		[CLKID_NNA_CORE_CLK_SEL]	= &sm1_nna_core_clk_sel.hw,
545 		[CLKID_NNA_CORE_CLK_DIV]	= &sm1_nna_core_clk_div.hw,
546 		[CLKID_NNA_CORE_CLK]		= &sm1_nna_core_clk.hw,
547+		[CLKID_MIPI_DSI_PXCLK_SEL]	= &g12a_mipi_dsi_pxclk_sel.hw,
548+		[CLKID_MIPI_DSI_PXCLK_DIV]	= &g12a_mipi_dsi_pxclk_div.hw,
549+		[CLKID_MIPI_DSI_PXCLK]		= &g12a_mipi_dsi_pxclk.hw,
550 		[NR_CLKS]			= NULL,
551 	},
552 	.num = NR_CLKS,
553@@ -5150,6 +5340,10 @@
554 	&sm1_nna_core_clk_sel,
555 	&sm1_nna_core_clk_div,
556 	&sm1_nna_core_clk,
557+	&g12a_mipi_dsi_pxclk_sel,
558+	&g12a_mipi_dsi_pxclk_div,
559+	&g12a_mipi_dsi_pxclk,
560+	&g12a_24m,
561 };
562
563 static const struct reg_sequence g12a_init_regs[] = {
564@@ -5166,6 +5360,57 @@
565
566 	xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
567
568+	void __iomem *clk_base;
569+
570+	clk_base = of_iomap(of_get_parent(pdev->dev.of_node), 0);
571+	if (!clk_base) {
572+		pr_err("%s: Unable to map clk base\n", __func__);
573+		return -1;
574+	}
575+
576+	cts_mipi_isp_clk_mux.reg = clk_base
577+		+ (unsigned long)(cts_mipi_isp_clk_mux.reg);
578+	cts_mipi_isp_clk_gate.reg = clk_base
579+		+ (unsigned long)(cts_mipi_isp_clk_gate.reg);
580+	cts_mipi_isp_clk_div.reg = clk_base
581+		+ (unsigned long)(cts_mipi_isp_clk_div.reg);
582+
583+	cts_mipi_csi_phy_clk0_mux.reg = clk_base
584+		+ (unsigned long)(cts_mipi_csi_phy_clk0_mux.reg);
585+	cts_mipi_csi_phy_clk0_div.reg = clk_base
586+		+ (unsigned long)(cts_mipi_csi_phy_clk0_div.reg);
587+	cts_mipi_csi_phy_clk0_gate.reg = clk_base
588+		+ (unsigned long)(cts_mipi_csi_phy_clk0_gate.reg);
589+
590+	// register composite clks
591+	hws[CLKID_MIPI_ISP_CLK_COMP] = clk_hw_register_composite(NULL,
592+		"cts_mipi_isp_clk_composite",
593+		media_parent_names, 8,
594+		&cts_mipi_isp_clk_mux.hw,
595+		&clk_mux_ops,
596+		&cts_mipi_isp_clk_div.hw,
597+		&clk_divider_ops,
598+		&cts_mipi_isp_clk_gate.hw,
599+		&clk_gate_ops, 0);
600+	if (IS_ERR(hws[CLKID_MIPI_ISP_CLK_COMP]))
601+		panic("%s: %d register cts_mipi_isp_clk_composite error\n",
602+			__func__, __LINE__);
603+
604+	hws[CLKID_MIPI_CSI_PHY_CLK0_COMP] = clk_hw_register_composite(NULL,
605+		"cts_mipi_csi_phy_clk0_composite",
606+		media_parent_names_mipi, 8,
607+		&cts_mipi_csi_phy_clk0_mux.hw,
608+		&clk_mux_ops,
609+		&cts_mipi_csi_phy_clk0_div.hw,
610+		&clk_divider_ops,
611+		&cts_mipi_csi_phy_clk0_gate.hw,
612+		&clk_gate_ops, 0);
613+	if (IS_ERR(hws[CLKID_MIPI_CSI_PHY_CLK0_COMP]))
614+		panic("%s: %d register cts_mipi_csi_phy_clk0_composite error\n",
615+			__func__, __LINE__);
616+
617+	meson_g12a_vcodec_init(hws, clk_base);
618+
619 	/* Setup clock notifier for cpu_clk_postmux0 */
620 	g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
621 	notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk_postmux0.hw);
622diff -Naur a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
623--- a/drivers/clk/meson/g12a.h	2022-12-19 17:13:11.813508635 +0800
624+++ b/drivers/clk/meson/g12a.h	2023-02-23 17:02:04.951750958 +0800
625@@ -44,6 +44,7 @@
626 #define HHI_PCIE_PLL_CNTL4		0x0A8
627 #define HHI_PCIE_PLL_CNTL5		0x0AC
628 #define HHI_PCIE_PLL_STS		0x0B8
629+#define HHI_XTAL_DIVN_CNTL		0x0BC /* 0x2f offset in data sheet */
630 #define HHI_HIFI_PLL_CNTL0		0x0D8
631 #define HHI_HIFI_PLL_CNTL1		0x0DC
632 #define HHI_HIFI_PLL_CNTL2		0x0E0
633@@ -70,6 +71,7 @@
634 #define HHI_MALI_CLK_CNTL		0x1b0
635 #define HHI_VPU_CLKC_CNTL		0x1b4
636 #define HHI_VPU_CLK_CNTL		0x1bC
637+#define HHI_MIPI_ISP_CLK_CNTL		0x1C0 /* 0x70 offset in data sheet */
638 #define HHI_NNA_CLK_CNTL		0x1C8
639 #define HHI_HDMI_CLK_CNTL		0x1CC
640 #define HHI_VDEC_CLK_CNTL		0x1E0
641@@ -117,6 +119,7 @@
642 #define HHI_HDMI_PLL_CNTL4		0x330
643 #define HHI_HDMI_PLL_CNTL5		0x334
644 #define HHI_HDMI_PLL_CNTL6		0x338
645+#define HHI_MIPI_CSI_PHY_CLK_CNTL	0x340 /* 0xd0 offset in data sheet */
646 #define HHI_SPICC_CLK_CNTL		0x3dc
647 #define HHI_SYS1_PLL_CNTL0		0x380
648 #define HHI_SYS1_PLL_CNTL1		0x384
649@@ -264,8 +267,9 @@
650 #define CLKID_NNA_AXI_CLK_DIV			263
651 #define CLKID_NNA_CORE_CLK_SEL			265
652 #define CLKID_NNA_CORE_CLK_DIV			266
653+#define CLKID_MIPI_DSI_PXCLK_DIV		268
654
655-#define NR_CLKS					268
656+#define NR_CLKS					288
657
658 /* include the CLKIDs that have been made part of the DT binding */
659 #include <dt-bindings/clock/g12a-clkc.h>
660diff -Naur a/drivers/clk/meson/g12a-vcodec-clk.c b/drivers/clk/meson/g12a-vcodec-clk.c
661--- a/drivers/clk/meson/g12a-vcodec-clk.c	1970-01-01 08:00:00.000000000 +0800
662+++ b/drivers/clk/meson/g12a-vcodec-clk.c	2023-02-23 17:02:04.951750958 +0800
663@@ -0,0 +1,731 @@
664+/*
665+ * drivers/amlogic/clk/g12a/g12a_clk_media.c
666+ *
667+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
668+ *
669+ * This program is free software; you can redistribute it and/or modify
670+ * it under the terms of the GNU General Public License as published by
671+ * the Free Software Foundation; either version 2 of the License, or
672+ * (at your option) any later version.
673+ *
674+ * This program is distributed in the hope that it will be useful, but WITHOUT
675+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
676+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
677+ * more details.
678+ *
679+ */
680+
681+#include <linux/clk.h>
682+#include <linux/clk-provider.h>
683+#include <linux/of_address.h>
684+#include <linux/platform_device.h>
685+#include <linux/init.h>
686+#include <linux/slab.h>
687+
688+#include "g12a.h"
689+#include "clk-mux.h"
690+#include "g12a-vcodec-clk.h"
691+
692+
693+extern spinlock_t meson_clk_lock;
694+
695+
696+const char *g12a_dec_parent_names[] = { "fclk_div2p5", "fclk_div3",
697+	"fclk_div4", "fclk_div5", "fclk_div7", "hifi_pll", "gp0_pll", "xtal"};
698+
699+/* cts_vdec_clk */
700+static struct clk_mux vdec_p0_mux = {
701+	.reg = (void *)HHI_VDEC_CLK_CNTL,
702+	.mask = 0x7,
703+	.shift = 9,
704+	.lock = &meson_clk_lock,
705+	.hw.init = &(struct clk_init_data){
706+		.name = "vdec_p0_mux",
707+		.ops = &clk_mux_ops,
708+		.parent_names = g12a_dec_parent_names,
709+		.num_parents = 8,
710+		.flags = CLK_GET_RATE_NOCACHE,
711+	},
712+};
713+
714+static struct clk_divider vdec_p0_div = {
715+	.reg = (void *)HHI_VDEC_CLK_CNTL,
716+	.shift = 0,
717+	.width = 7,
718+	.lock = &meson_clk_lock,
719+	.hw.init = &(struct clk_init_data){
720+		.name = "vdec_p0_div",
721+		.ops = &clk_divider_ops,
722+		.parent_names = (const char *[]){ "vdec_p0_mux" },
723+		.num_parents = 1,
724+		.flags = CLK_GET_RATE_NOCACHE,
725+	},
726+};
727+
728+static struct clk_gate vdec_p0_gate = {
729+	.reg = (void *)HHI_VDEC_CLK_CNTL,
730+	.bit_idx = 8,
731+	.lock = &meson_clk_lock,
732+	.hw.init = &(struct clk_init_data) {
733+		.name = "vdec_p0_gate",
734+		.ops = &clk_gate_ops,
735+		.parent_names = (const char *[]){ "vdec_p0_div" },
736+		.num_parents = 1,
737+		.flags = CLK_GET_RATE_NOCACHE,
738+	},
739+};
740+
741+static struct clk_mux vdec_p1_mux = {
742+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
743+	.mask = 0x7,
744+	.shift = 9,
745+	.lock = &meson_clk_lock,
746+	.hw.init = &(struct clk_init_data){
747+		.name = "vdec_p1_mux",
748+		.ops = &clk_mux_ops,
749+		.parent_names = g12a_dec_parent_names,
750+		.num_parents = 8,
751+		.flags = CLK_GET_RATE_NOCACHE,
752+	},
753+};
754+
755+static struct clk_divider vdec_p1_div = {
756+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
757+	.shift = 0,
758+	.width = 7,
759+	.lock = &meson_clk_lock,
760+	.hw.init = &(struct clk_init_data){
761+		.name = "vdec_p1_div",
762+		.ops = &clk_divider_ops,
763+		.parent_names = (const char *[]){ "vdec_p1_mux" },
764+		.num_parents = 1,
765+		.flags = CLK_GET_RATE_NOCACHE,
766+	},
767+};
768+
769+static struct clk_gate vdec_p1_gate = {
770+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
771+	.bit_idx = 8,
772+	.lock = &meson_clk_lock,
773+	.hw.init = &(struct clk_init_data) {
774+		.name = "vdec_p1_gate",
775+		.ops = &clk_gate_ops,
776+		.parent_names = (const char *[]){ "vdec_p1_div" },
777+		.num_parents = 1,
778+		.flags = CLK_GET_RATE_NOCACHE,
779+	},
780+};
781+
782+static struct clk_mux vdec_mux = {
783+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
784+	.mask = 0x1,
785+	.shift = 15,
786+	.lock = &meson_clk_lock,
787+	.flags = CLK_PARENT_ALTERNATE,
788+	.hw.init = &(struct clk_init_data){
789+		.name = "vdec_mux",
790+		.ops = &meson_clk_mux_ops,
791+		.parent_names = (const char *[]){ "vdec_p0_composite",
792+			"vdec_p1_composite"},
793+		.num_parents = 2,
794+		.flags = CLK_GET_RATE_NOCACHE,
795+	},
796+};
797+
798+/* cts_hcodec_clk */
799+static struct clk_mux hcodec_p0_mux = {
800+	.reg = (void *)HHI_VDEC_CLK_CNTL,
801+	.mask = 0x7,
802+	.shift = 25,
803+	.lock = &meson_clk_lock,
804+	.hw.init = &(struct clk_init_data){
805+		.name = "hcodec_p0_mux",
806+		.ops = &clk_mux_ops,
807+		.parent_names = g12a_dec_parent_names,
808+		.num_parents = 8,
809+		.flags = CLK_GET_RATE_NOCACHE,
810+	},
811+};
812+
813+static struct clk_divider hcodec_p0_div = {
814+	.reg = (void *)HHI_VDEC_CLK_CNTL,
815+	.shift = 16,
816+	.width = 7,
817+	.lock = &meson_clk_lock,
818+	.hw.init = &(struct clk_init_data){
819+		.name = "hcodec_p0_div",
820+		.ops = &clk_divider_ops,
821+		.parent_names = (const char *[]){ "hcodec_p0_mux" },
822+		.num_parents = 1,
823+		.flags = CLK_GET_RATE_NOCACHE,
824+	},
825+};
826+
827+static struct clk_gate hcodec_p0_gate = {
828+	.reg = (void *)HHI_VDEC_CLK_CNTL,
829+	.bit_idx = 24,
830+	.lock = &meson_clk_lock,
831+	.hw.init = &(struct clk_init_data) {
832+		.name = "hcodec_p0_gate",
833+		.ops = &clk_gate_ops,
834+		.parent_names = (const char *[]){ "hcodec_p0_div" },
835+		.num_parents = 1,
836+		.flags = CLK_GET_RATE_NOCACHE,
837+	},
838+};
839+
840+static struct clk_mux hcodec_p1_mux = {
841+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
842+	.mask = 0x7,
843+	.shift = 25,
844+	.lock = &meson_clk_lock,
845+	.hw.init = &(struct clk_init_data){
846+		.name = "hcodec_p1_mux",
847+		.ops = &clk_mux_ops,
848+		.parent_names = g12a_dec_parent_names,
849+		.num_parents = 8,
850+		.flags = CLK_GET_RATE_NOCACHE,
851+	},
852+};
853+
854+static struct clk_divider hcodec_p1_div = {
855+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
856+	.shift = 16,
857+	.width = 7,
858+	.lock = &meson_clk_lock,
859+	.hw.init = &(struct clk_init_data){
860+		.name = "hcodec_p1_div",
861+		.ops = &clk_divider_ops,
862+		.parent_names = (const char *[]){ "hcodec_p1_mux" },
863+		.num_parents = 1,
864+		.flags = CLK_GET_RATE_NOCACHE,
865+	},
866+};
867+
868+static struct clk_gate hcodec_p1_gate = {
869+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
870+	.bit_idx = 24,
871+	.lock = &meson_clk_lock,
872+	.hw.init = &(struct clk_init_data) {
873+		.name = "hcodec_p1_gate",
874+		.ops = &clk_gate_ops,
875+		.parent_names = (const char *[]){ "hcodec_p1_div" },
876+		.num_parents = 1,
877+		.flags = CLK_GET_RATE_NOCACHE,
878+	},
879+};
880+
881+static struct clk_mux hcodec_mux = {
882+	.reg = (void *)HHI_VDEC3_CLK_CNTL,
883+	.mask = 0x1,
884+	.shift = 31,
885+	.lock = &meson_clk_lock,
886+	.flags = CLK_PARENT_ALTERNATE,
887+	.hw.init = &(struct clk_init_data){
888+		.name = "hcodec_mux",
889+		.ops = &meson_clk_mux_ops,
890+		.parent_names = (const char *[]){ "hcodec_p0_composite",
891+			"hcodec_p1_composite"},
892+		.num_parents = 2,
893+		.flags = CLK_GET_RATE_NOCACHE,
894+	},
895+};
896+
897+/* cts_hevcb_clk */
898+static struct clk_mux hevc_p0_mux = {
899+	.reg = (void *)HHI_VDEC2_CLK_CNTL,
900+	.mask = 0x7,
901+	.shift = 25,
902+	.lock = &meson_clk_lock,
903+	.hw.init = &(struct clk_init_data){
904+		.name = "hevc_p0_mux",
905+		.ops = &clk_mux_ops,
906+		.parent_names = g12a_dec_parent_names,
907+		.num_parents = 8,
908+		.flags = CLK_GET_RATE_NOCACHE,
909+	},
910+};
911+
912+static struct clk_divider hevc_p0_div = {
913+	.reg = (void *)HHI_VDEC2_CLK_CNTL,
914+	.shift = 16,
915+	.width = 7,
916+	.lock = &meson_clk_lock,
917+	.hw.init = &(struct clk_init_data){
918+		.name = "hevc_p0_div",
919+		.ops = &clk_divider_ops,
920+		.parent_names = (const char *[]){ "hevc_p0_mux" },
921+		.num_parents = 1,
922+		.flags = CLK_GET_RATE_NOCACHE,
923+	},
924+};
925+
926+static struct clk_gate hevc_p0_gate = {
927+	.reg = (void *)HHI_VDEC2_CLK_CNTL,
928+	.bit_idx = 24,
929+	.lock = &meson_clk_lock,
930+	.hw.init = &(struct clk_init_data) {
931+		.name = "hevc_p0_gate",
932+		.ops = &clk_gate_ops,
933+		.parent_names = (const char *[]){ "hevc_p0_div" },
934+		.num_parents = 1,
935+		.flags = CLK_GET_RATE_NOCACHE,
936+	},
937+};
938+
939+static struct clk_mux hevc_p1_mux = {
940+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
941+	.mask = 0x7,
942+	.shift = 25,
943+	.lock = &meson_clk_lock,
944+	.hw.init = &(struct clk_init_data){
945+		.name = "hevc_p1_mux",
946+		.ops = &clk_mux_ops,
947+		.parent_names = g12a_dec_parent_names,
948+		.num_parents = 8,
949+		.flags = CLK_GET_RATE_NOCACHE,
950+	},
951+};
952+
953+static struct clk_divider hevc_p1_div = {
954+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
955+	.shift = 16,
956+	.width = 7,
957+	.lock = &meson_clk_lock,
958+	.hw.init = &(struct clk_init_data){
959+		.name = "hevc_p1_div",
960+		.ops = &clk_divider_ops,
961+		.parent_names = (const char *[]){ "hevc_p1_mux" },
962+		.num_parents = 1,
963+		.flags = CLK_GET_RATE_NOCACHE,
964+	},
965+};
966+
967+static struct clk_gate hevc_p1_gate = {
968+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
969+	.bit_idx = 24,
970+	.lock = &meson_clk_lock,
971+	.hw.init = &(struct clk_init_data) {
972+		.name = "hevc_p1_gate",
973+		.ops = &clk_gate_ops,
974+		.parent_names = (const char *[]){ "hevc_p1_div" },
975+		.num_parents = 1,
976+		.flags = CLK_GET_RATE_NOCACHE,
977+	},
978+};
979+
980+static struct clk_mux hevc_mux = {
981+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
982+	.mask = 0x1,
983+	.shift = 31,
984+	.lock = &meson_clk_lock,
985+	.flags = CLK_PARENT_ALTERNATE,
986+	.hw.init = &(struct clk_init_data){
987+		.name = "hevc_mux",
988+		.ops = &meson_clk_mux_ops,
989+		.parent_names = (const char *[]){ "hevc_p0_composite",
990+			"hevc_p1_composite"},
991+		.num_parents = 2,
992+		.flags = CLK_GET_RATE_NOCACHE,
993+	},
994+};
995+
996+/* cts_hevcf_clk */
997+static struct clk_mux hevcf_p0_mux = {
998+	.reg = (void *)HHI_VDEC2_CLK_CNTL,
999+	.mask = 0x7,
1000+	.shift = 9,
1001+	.lock = &meson_clk_lock,
1002+	.hw.init = &(struct clk_init_data){
1003+		.name = "hevcf_p0_mux",
1004+		.ops = &clk_mux_ops,
1005+		.parent_names = g12a_dec_parent_names,
1006+		.num_parents = 8,
1007+		.flags = CLK_GET_RATE_NOCACHE,
1008+	},
1009+};
1010+
1011+static struct clk_divider hevcf_p0_div = {
1012+	.reg = (void *)HHI_VDEC2_CLK_CNTL,
1013+	.shift = 0,
1014+	.width = 7,
1015+	.lock = &meson_clk_lock,
1016+	.hw.init = &(struct clk_init_data){
1017+		.name = "hevcf_p0_div",
1018+		.ops = &clk_divider_ops,
1019+		.parent_names = (const char *[]){ "hevcf_p0_mux" },
1020+		.num_parents = 1,
1021+		.flags = CLK_GET_RATE_NOCACHE,
1022+	},
1023+};
1024+
1025+static struct clk_gate hevcf_p0_gate = {
1026+	.reg = (void *)HHI_VDEC2_CLK_CNTL,
1027+	.bit_idx = 8,
1028+	.lock = &meson_clk_lock,
1029+	.hw.init = &(struct clk_init_data) {
1030+		.name = "hevcf_p0_gate",
1031+		.ops = &clk_gate_ops,
1032+		.parent_names = (const char *[]){ "hevcf_p0_div" },
1033+		.num_parents = 1,
1034+		.flags = CLK_GET_RATE_NOCACHE,
1035+	},
1036+};
1037+
1038+static struct clk_mux hevcf_p1_mux = {
1039+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
1040+	.mask = 0x7,
1041+	.shift = 9,
1042+	.lock = &meson_clk_lock,
1043+	.hw.init = &(struct clk_init_data){
1044+		.name = "hevcf_p1_mux",
1045+		.ops = &clk_mux_ops,
1046+		.parent_names = g12a_dec_parent_names,
1047+		.num_parents = 8,
1048+		.flags = CLK_GET_RATE_NOCACHE,
1049+	},
1050+};
1051+
1052+static struct clk_divider hevcf_p1_div = {
1053+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
1054+	.shift = 0,
1055+	.width = 7,
1056+	.lock = &meson_clk_lock,
1057+	.hw.init = &(struct clk_init_data){
1058+		.name = "hevcf_p1_div",
1059+		.ops = &clk_divider_ops,
1060+		.parent_names = (const char *[]){ "hevcf_p1_mux" },
1061+		.num_parents = 1,
1062+		.flags = CLK_GET_RATE_NOCACHE,
1063+	},
1064+};
1065+
1066+static struct clk_gate hevcf_p1_gate = {
1067+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
1068+	.bit_idx = 8,
1069+	.lock = &meson_clk_lock,
1070+	.hw.init = &(struct clk_init_data) {
1071+		.name = "hevcf_p1_gate",
1072+		.ops = &clk_gate_ops,
1073+		.parent_names = (const char *[]){ "hevcf_p1_div" },
1074+		.num_parents = 1,
1075+		.flags = CLK_GET_RATE_NOCACHE,
1076+	},
1077+};
1078+
1079+static struct clk_mux hevcf_mux = {
1080+	.reg = (void *)HHI_VDEC4_CLK_CNTL,
1081+	.mask = 0x1,
1082+	.shift = 15,
1083+	.lock = &meson_clk_lock,
1084+	.flags = CLK_PARENT_ALTERNATE,
1085+	.hw.init = &(struct clk_init_data){
1086+		.name = "hevcf_mux",
1087+		.ops = &meson_clk_mux_ops,
1088+		.parent_names = (const char *[]){ "hevcf_p0_composite",
1089+			"hevcf_p1_composite"},
1090+		.num_parents = 2,
1091+		.flags = CLK_GET_RATE_NOCACHE,
1092+	},
1093+};
1094+
1095+static const char * const vpu_clkb_tmp_parent_names[] = { "vpu",
1096+	"fclk_div4", "fclk_div5", "fclk_div7"};
1097+
1098+static struct clk_mux vpu_clkb_tmp_mux = {
1099+	.reg = (void *)HHI_VPU_CLKB_CNTL,
1100+	.mask = 0x3,
1101+	.shift = 20,
1102+	.lock = &meson_clk_lock,
1103+	.hw.init = &(struct clk_init_data){
1104+		.name = "vpu_clkb_tmp_mux",
1105+		.ops = &clk_mux_ops,
1106+		.parent_names = vpu_clkb_tmp_parent_names,
1107+		.num_parents = ARRAY_SIZE(vpu_clkb_tmp_parent_names),
1108+		.flags = CLK_GET_RATE_NOCACHE,
1109+	},
1110+};
1111+
1112+static struct clk_divider vpu_clkb_tmp_div = {
1113+	.reg = (void *)HHI_VPU_CLKB_CNTL,
1114+	.shift = 16,
1115+	.width = 4,
1116+	.lock = &meson_clk_lock,
1117+	.hw.init = &(struct clk_init_data){
1118+		.name = "vpu_clkb_tmp_div",
1119+		.ops = &clk_divider_ops,
1120+		.parent_names = (const char *[]){ "vpu_clkb_tmp_mux" },
1121+		.num_parents = 1,
1122+		.flags = CLK_GET_RATE_NOCACHE,
1123+	},
1124+};
1125+
1126+static struct clk_gate vpu_clkb_tmp_gate = {
1127+	.reg = (void *)HHI_VPU_CLKB_CNTL,
1128+	.bit_idx = 24,
1129+	.lock = &meson_clk_lock,
1130+	.hw.init = &(struct clk_init_data) {
1131+		.name = "vpu_clkb_tmp_gate",
1132+		.ops = &clk_gate_ops,
1133+		.parent_names = (const char *[]){ "vpu_clkb_tmp_div" },
1134+		.num_parents = 1,
1135+		.flags = CLK_GET_RATE_NOCACHE,
1136+	},
1137+};
1138+
1139+static const char * const vpu_clkb_parent_names[]
1140+						= { "vpu_clkb_tmp_composite" };
1141+
1142+static struct clk_divider vpu_clkb_div = {
1143+	.reg = (void *)HHI_VPU_CLKB_CNTL,
1144+	.shift = 0,
1145+	.width = 8,
1146+	.lock = &meson_clk_lock,
1147+	.hw.init = &(struct clk_init_data){
1148+		.name = "vpu_clkb_div",
1149+		.ops = &clk_divider_ops,
1150+		.parent_names = (const char *[]){ "vpu_clkb_tmp_composite" },
1151+		.num_parents = 1,
1152+		.flags = CLK_GET_RATE_NOCACHE,
1153+	},
1154+};
1155+
1156+static struct clk_gate vpu_clkb_gate = {
1157+	.reg = (void *)HHI_VPU_CLKB_CNTL,
1158+	.bit_idx = 8,
1159+	.lock = &meson_clk_lock,
1160+	.hw.init = &(struct clk_init_data) {
1161+		.name = "vpu_clkb_gate",
1162+		.ops = &clk_gate_ops,
1163+		.parent_names = (const char *[]){ "vpu_clkb_div" },
1164+		.num_parents = 1,
1165+		.flags = CLK_GET_RATE_NOCACHE,
1166+	},
1167+};
1168+
1169+void meson_g12a_vcodec_init(struct clk_hw **clk_hws, void __iomem *clk_base)
1170+{
1171+	/* cts_vdec_clk */
1172+	vdec_p0_mux.reg = clk_base + (unsigned long)(vdec_p0_mux.reg);
1173+	vdec_p0_div.reg = clk_base + (unsigned long)(vdec_p0_div.reg);
1174+	vdec_p0_gate.reg = clk_base + (unsigned long)(vdec_p0_gate.reg);
1175+	vdec_p1_mux.reg = clk_base + (unsigned long)(vdec_p1_mux.reg);
1176+	vdec_p1_div.reg = clk_base + (unsigned long)(vdec_p1_div.reg);
1177+	vdec_p1_gate.reg = clk_base + (unsigned long)(vdec_p1_gate.reg);
1178+	vdec_mux.reg = clk_base + (unsigned long)(vdec_mux.reg);
1179+
1180+	/* cts_hcodec_clk */
1181+	hcodec_p0_mux.reg = clk_base + (unsigned long)(hcodec_p0_mux.reg);
1182+	hcodec_p0_div.reg = clk_base + (unsigned long)(hcodec_p0_div.reg);
1183+	hcodec_p0_gate.reg = clk_base + (unsigned long)(hcodec_p0_gate.reg);
1184+	hcodec_p1_mux.reg = clk_base + (unsigned long)(hcodec_p1_mux.reg);
1185+	hcodec_p1_div.reg = clk_base + (unsigned long)(hcodec_p1_div.reg);
1186+	hcodec_p1_gate.reg = clk_base + (unsigned long)(hcodec_p1_gate.reg);
1187+	hcodec_mux.reg = clk_base + (unsigned long)(hcodec_mux.reg);
1188+
1189+	/* cts_hevc_clk */
1190+	hevc_p0_mux.reg = clk_base + (unsigned long)(hevc_p0_mux.reg);
1191+	hevc_p0_div.reg = clk_base + (unsigned long)(hevc_p0_div.reg);
1192+	hevc_p0_gate.reg = clk_base + (unsigned long)(hevc_p0_gate.reg);
1193+	hevc_p1_mux.reg = clk_base + (unsigned long)(hevc_p1_mux.reg);
1194+	hevc_p1_div.reg = clk_base + (unsigned long)(hevc_p1_div.reg);
1195+	hevc_p1_gate.reg = clk_base + (unsigned long)(hevc_p1_gate.reg);
1196+	hevc_mux.reg = clk_base + (unsigned long)(hevc_mux.reg);
1197+
1198+	/* cts_hevcf_clk */
1199+	hevcf_p0_mux.reg = clk_base + (unsigned long)(hevcf_p0_mux.reg);
1200+	hevcf_p0_div.reg = clk_base + (unsigned long)(hevcf_p0_div.reg);
1201+	hevcf_p0_gate.reg = clk_base + (unsigned long)(hevcf_p0_gate.reg);
1202+	hevcf_p1_mux.reg = clk_base + (unsigned long)(hevcf_p1_mux.reg);
1203+	hevcf_p1_div.reg = clk_base + (unsigned long)(hevcf_p1_div.reg);
1204+	hevcf_p1_gate.reg = clk_base + (unsigned long)(hevcf_p1_gate.reg);
1205+	hevcf_mux.reg = clk_base + (unsigned long)(hevcf_mux.reg);
1206+
1207+	vpu_clkb_tmp_mux.reg = clk_base + (unsigned long)(vpu_clkb_tmp_mux.reg);
1208+	vpu_clkb_tmp_div.reg = clk_base + (unsigned long)(vpu_clkb_tmp_div.reg);
1209+	vpu_clkb_tmp_gate.reg = clk_base + (unsigned long)(vpu_clkb_tmp_gate.reg);
1210+
1211+	vpu_clkb_div.reg = clk_base + (unsigned long)(vpu_clkb_div.reg);
1212+	vpu_clkb_gate.reg = clk_base + (unsigned long)(vpu_clkb_gate.reg);
1213+
1214+	/* cts_vdec_clk */
1215+	clk_hws[CLKID_VDEC_P0_COMP] = clk_hw_register_composite(NULL,
1216+		"vdec_p0_composite",
1217+		g12a_dec_parent_names, 8,
1218+		&vdec_p0_mux.hw,
1219+		&clk_mux_ops,
1220+		&vdec_p0_div.hw,
1221+		&clk_divider_ops,
1222+		&vdec_p0_gate.hw,
1223+		&clk_gate_ops, 0);
1224+	if (IS_ERR(clk_hws[CLKID_VDEC_P0_COMP]))
1225+		panic("%s: %d clk_hw_register_composite vdec_p0_composite error\n",
1226+			__func__, __LINE__);
1227+
1228+	clk_hws[CLKID_VDEC_P1_COMP] = clk_hw_register_composite(NULL,
1229+		"vdec_p1_composite",
1230+		g12a_dec_parent_names, 8,
1231+		&vdec_p1_mux.hw,
1232+		&clk_mux_ops,
1233+		&vdec_p1_div.hw,
1234+		&clk_divider_ops,
1235+		&vdec_p1_gate.hw,
1236+		&clk_gate_ops, 0);
1237+	if (IS_ERR(clk_hws[CLKID_VDEC_P1_COMP]))
1238+		panic("%s: %d clk_hw_register_composite vdec_p1_composite error\n",
1239+			__func__, __LINE__);
1240+
1241+	if (clk_hw_register(NULL, &vdec_mux.hw)) {
1242+		panic("%s: %d clk_hw_register vdec_mux error\n",
1243+			__func__, __LINE__);
1244+	} else {
1245+		clk_hws[CLKID_VDEC_MUX] = &vdec_mux.hw;
1246+	}
1247+
1248+	if (IS_ERR(clk_hws[CLKID_VDEC_MUX]))
1249+		panic("%s: %d clk_hw_register vdec_mux error\n",
1250+			__func__, __LINE__);
1251+
1252+	/* cts_hcodec_clk */
1253+	clk_hws[CLKID_HCODEC_P0_COMP] = clk_hw_register_composite(NULL,
1254+		"hcodec_p0_composite",
1255+		g12a_dec_parent_names, 8,
1256+		&hcodec_p0_mux.hw,
1257+		&clk_mux_ops,
1258+		&hcodec_p0_div.hw,
1259+		&clk_divider_ops,
1260+		&hcodec_p0_gate.hw,
1261+		&clk_gate_ops, 0);
1262+	if (IS_ERR(clk_hws[CLKID_HCODEC_P0_COMP]))
1263+		panic("%s: %d clk_hw_register_composite hcodec_p0_composite error\n",
1264+			__func__, __LINE__);
1265+
1266+	clk_hws[CLKID_HCODEC_P1_COMP] = clk_hw_register_composite(NULL,
1267+		"hcodec_p1_composite",
1268+		g12a_dec_parent_names, 8,
1269+		&hcodec_p1_mux.hw,
1270+		&clk_mux_ops,
1271+		&hcodec_p1_div.hw,
1272+		&clk_divider_ops,
1273+		&hcodec_p1_gate.hw,
1274+		&clk_gate_ops, 0);
1275+	if (IS_ERR(clk_hws[CLKID_HCODEC_P1_COMP]))
1276+		panic("%s: %d clk_hw_register_composite hcodec_p1_composite error\n",
1277+		__func__, __LINE__);
1278+
1279+	if (clk_hw_register(NULL, &hcodec_mux.hw)) {
1280+		panic("%s: %d clk_hw_register hcodec_mux error\n",
1281+			__func__, __LINE__);
1282+	} else {
1283+		clk_hws[CLKID_HCODEC_MUX] = &hcodec_mux.hw;
1284+	}
1285+
1286+	if (IS_ERR(clk_hws[CLKID_HCODEC_MUX]))
1287+		panic("%s: %d clk_hw_register hcodec_mux error\n",
1288+			__func__, __LINE__);
1289+
1290+	/* cts_hevc_clk */
1291+	clk_hws[CLKID_HEVC_P0_COMP] = clk_hw_register_composite(NULL,
1292+		"hevc_p0_composite",
1293+		g12a_dec_parent_names, 8,
1294+		&hevc_p0_mux.hw,
1295+		&clk_mux_ops,
1296+		&hevc_p0_div.hw,
1297+		&clk_divider_ops,
1298+		&hevc_p0_gate.hw,
1299+		&clk_gate_ops, 0);
1300+	if (IS_ERR(clk_hws[CLKID_HEVC_P0_COMP]))
1301+		panic("%s: %d clk_hw_register_composite hevc_p0_composite error\n",
1302+			__func__, __LINE__);
1303+
1304+	clk_hws[CLKID_HEVC_P1_COMP] = clk_hw_register_composite(NULL,
1305+		"hevc_p1_composite",
1306+		g12a_dec_parent_names, 8,
1307+		&hevc_p1_mux.hw,
1308+		&clk_mux_ops,
1309+		&hevc_p1_div.hw,
1310+		&clk_divider_ops,
1311+		&hevc_p1_gate.hw,
1312+		&clk_gate_ops, 0);
1313+	if (IS_ERR(clk_hws[CLKID_HEVC_P1_COMP]))
1314+		panic("%s: %d clk_hw_register_composite hevc_p1_composite error\n",
1315+		__func__, __LINE__);
1316+
1317+	if (clk_hw_register(NULL, &hevc_mux.hw)) {
1318+		panic("%s: %d clk_hw_register hcodec_mux error\n",
1319+			__func__, __LINE__);
1320+	} else {
1321+		clk_hws[CLKID_HEVC_MUX] = &hevc_mux.hw;
1322+	}
1323+
1324+	if (IS_ERR(clk_hws[CLKID_HEVC_MUX]))
1325+		panic("%s: %d clk_hw_register hevc_mux error\n",
1326+			__func__, __LINE__);
1327+
1328+	/* cts_hevcf_clk */
1329+	clk_hws[CLKID_HEVCF_P0_COMP] = clk_hw_register_composite(NULL,
1330+		"hevcf_p0_composite",
1331+		g12a_dec_parent_names, 8,
1332+		 &hevcf_p0_mux.hw,
1333+		&clk_mux_ops,
1334+		&hevcf_p0_div.hw,
1335+		&clk_divider_ops,
1336+		&hevcf_p0_gate.hw,
1337+		&clk_gate_ops, 0);
1338+	if (IS_ERR(clk_hws[CLKID_HEVCF_P0_COMP]))
1339+		panic("%s: %d clk_hw_register_composite hevcf_p0_composite error\n",
1340+			__func__, __LINE__);
1341+
1342+	clk_hws[CLKID_HEVCF_P1_COMP] = clk_hw_register_composite(NULL,
1343+		"hevcf_p1_composite",
1344+		g12a_dec_parent_names, 8,
1345+		&hevcf_p1_mux.hw,
1346+		&clk_mux_ops,
1347+		&hevcf_p1_div.hw,
1348+		&clk_divider_ops,
1349+		&hevcf_p1_gate.hw,
1350+		&clk_gate_ops, 0);
1351+	if (IS_ERR(clk_hws[CLKID_HEVCF_P1_COMP]))
1352+		panic("%s: %d clk_hw_register_composite hevcf_p1_composite error\n",
1353+		__func__, __LINE__);
1354+
1355+	if (clk_hw_register(NULL, &hevcf_mux.hw)) {
1356+		panic("%s: %d clk_hw_register hevcf_mux error\n",
1357+			__func__, __LINE__);
1358+	} else {
1359+		clk_hws[CLKID_HEVCF_MUX] = &hevcf_mux.hw;
1360+	}
1361+
1362+	if (IS_ERR(clk_hws[CLKID_HEVCF_MUX]))
1363+		panic("%s: %d clk_hw_register hevcf_mux error\n",
1364+			__func__, __LINE__);
1365+
1366+	clk_hws[CLKID_VPU_CLKB_TMP_COMP] = clk_hw_register_composite(NULL,
1367+			"vpu_clkb_tmp_composite",
1368+			vpu_clkb_tmp_parent_names, 4,
1369+			&vpu_clkb_tmp_mux.hw,
1370+			&clk_mux_ops,
1371+			&vpu_clkb_tmp_div.hw,
1372+			&clk_divider_ops,
1373+			&vpu_clkb_tmp_gate.hw,
1374+			&clk_gate_ops, 0);
1375+	if (IS_ERR(clk_hws[CLKID_VPU_CLKB_TMP_COMP]))
1376+		panic("%s: %d clk_register_composite vpu_clkb_tmp_composite error\n",
1377+			__func__, __LINE__);
1378+
1379+	clk_hws[CLKID_VPU_CLKB_COMP] = clk_hw_register_composite(NULL,
1380+			"vpu_clkb_composite",
1381+			vpu_clkb_parent_names, 1,
1382+			NULL,
1383+			NULL,
1384+			&vpu_clkb_div.hw,
1385+			&clk_divider_ops,
1386+			&vpu_clkb_gate.hw,
1387+			&clk_gate_ops, 0);
1388+	if (IS_ERR(clk_hws[CLKID_VPU_CLKB_COMP]))
1389+		panic("%s: %d clk_register_composite vpu_clkb_composite error\n",
1390+			__func__, __LINE__);
1391+
1392+	pr_info("%s: register meson media clk\n", __func__);
1393+}
1394+
1395diff -Naur a/drivers/clk/meson/g12a-vcodec-clk.h b/drivers/clk/meson/g12a-vcodec-clk.h
1396--- a/drivers/clk/meson/g12a-vcodec-clk.h	1970-01-01 08:00:00.000000000 +0800
1397+++ b/drivers/clk/meson/g12a-vcodec-clk.h	2023-02-23 17:02:04.951750958 +0800
1398@@ -0,0 +1,12 @@
1399+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
1400+/*
1401+ * Copyright (c) 2019 BayLibre, SAS
1402+ * Author: Neil Armstrong <narmstrong@baylibre.com>
1403+ */
1404+
1405+#ifndef __G12A_VCODEC_CLK_H
1406+#define __G12A_VCODEC_CLK_H
1407+
1408+extern void meson_g12a_vcodec_init(struct clk_hw **clks, void __iomem *clk_base);
1409+
1410+#endif /* __G12A_VCODEC_CLK_H */
1411diff -Naur a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
1412--- a/drivers/clk/meson/Makefile	2022-12-19 17:13:11.809508587 +0800
1413+++ b/drivers/clk/meson/Makefile	2023-02-23 17:02:04.951750958 +0800
1414@@ -7,7 +7,7 @@
1415 obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
1416 obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
1417 obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
1418-obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
1419+obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o clk-mux.o
1420 obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o
1421 obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o
1422 obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o
1423@@ -17,5 +17,5 @@
1424 obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o
1425 obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o
1426 obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o
1427-obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o
1428+obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o g12a-vcodec-clk.o
1429 obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o meson8-ddr.o
1430diff -Naur a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
1431--- a/drivers/clk/meson/vid-pll-div.c	2022-12-19 17:13:11.813508635 +0800
1432+++ b/drivers/clk/meson/vid-pll-div.c	2023-02-23 17:02:04.951750958 +0800
1433@@ -39,12 +39,14 @@
1434 	}
1435
1436 static const struct vid_pll_div vid_pll_div_table[] = {
1437+	VID_PLL_DIV(0xffff, 0, 1, 1),	/* 1/1  => /1 */
1438 	VID_PLL_DIV(0x0aaa, 0, 2, 1),	/* 2/1  => /2 */
1439 	VID_PLL_DIV(0x5294, 2, 5, 2),	/* 5/2  => /2.5 */
1440 	VID_PLL_DIV(0x0db6, 0, 3, 1),	/* 3/1  => /3 */
1441 	VID_PLL_DIV(0x36cc, 1, 7, 2),	/* 7/2  => /3.5 */
1442 	VID_PLL_DIV(0x6666, 2, 15, 4),	/* 15/4 => /3.75 */
1443 	VID_PLL_DIV(0x0ccc, 0, 4, 1),	/* 4/1  => /4 */
1444+	VID_PLL_DIV(0x0ccc, 1, 467, 100), /* 467/100  => /4.67 */
1445 	VID_PLL_DIV(0x739c, 2, 5, 1),	/* 5/1  => /5 */
1446 	VID_PLL_DIV(0x0e38, 0, 6, 1),	/* 6/1  => /6 */
1447 	VID_PLL_DIV(0x0000, 3, 25, 4),	/* 25/4 => /6.25 */
1448diff -Naur a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
1449--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c	2022-12-19 17:13:12.585517887 +0800
1450+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c	2023-02-23 17:02:04.951750958 +0800
1451@@ -143,6 +143,7 @@
1452 struct dw_hdmi {
1453 	struct drm_connector connector;
1454 	struct drm_bridge bridge;
1455+	struct drm_bridge *next_bridge;
1456
1457 	unsigned int version;
1458
1459@@ -754,6 +755,11 @@
1460 	else
1461 		hdmi->mc_clkdis |= HDMI_MC_CLKDIS_AUDCLK_DISABLE;
1462 	hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
1463+
1464+	 if (enable) {
1465+		hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0);
1466+		hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
1467+	}
1468 }
1469
1470 static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi)
1471@@ -2403,21 +2409,6 @@
1472 	return ret;
1473 }
1474
1475-static bool hdr_metadata_equal(const struct drm_connector_state *old_state,
1476-			       const struct drm_connector_state *new_state)
1477-{
1478-	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
1479-	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
1480-
1481-	if (!old_blob || !new_blob)
1482-		return old_blob == new_blob;
1483-
1484-	if (old_blob->length != new_blob->length)
1485-		return false;
1486-
1487-	return !memcmp(old_blob->data, new_blob->data, old_blob->length);
1488-}
1489-
1490 static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
1491 					  struct drm_atomic_state *state)
1492 {
1493@@ -2431,7 +2422,7 @@
1494 	if (!crtc)
1495 		return 0;
1496
1497-	if (!hdr_metadata_equal(old_state, new_state)) {
1498+	if (!drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) {
1499 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
1500 		if (IS_ERR(crtc_state))
1501 			return PTR_ERR(crtc_state);
1502@@ -2500,8 +2491,7 @@
1503 	drm_connector_attach_max_bpc_property(connector, 8, 16);
1504
1505 	if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
1506-		drm_object_attach_property(&connector->base,
1507-			connector->dev->mode_config.hdr_output_metadata_property, 0);
1508+		drm_connector_attach_hdr_output_metadata_property(connector);
1509
1510 	drm_connector_attach_encoder(connector, hdmi->bridge.encoder);
1511
1512@@ -2800,7 +2790,8 @@
1513 	struct dw_hdmi *hdmi = bridge->driver_private;
1514
1515 	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
1516-		return 0;
1517+		return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
1518+					 bridge, flags);
1519
1520 	return dw_hdmi_connector_create(hdmi);
1521 }
1522@@ -3185,6 +3176,52 @@
1523 /* -----------------------------------------------------------------------------
1524  * Probe/remove API, used from platforms based on the DRM bridge API.
1525  */
1526+
1527+static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi)
1528+{
1529+	struct device_node *endpoint;
1530+	struct device_node *remote;
1531+
1532+	if (!hdmi->plat_data->output_port)
1533+		return 0;
1534+
1535+	endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node,
1536+						 hdmi->plat_data->output_port,
1537+						 -1);
1538+	if (!endpoint) {
1539+		/*
1540+		 * On platforms whose bindings don't make the output port
1541+		 * mandatory (such as Rockchip) the plat_data->output_port
1542+		 * field isn't set, so it's safe to make this a fatal error.
1543+		 */
1544+		dev_err(hdmi->dev, "Missing endpoint in port@%u\n",
1545+			hdmi->plat_data->output_port);
1546+		return -ENODEV;
1547+	}
1548+
1549+	remote = of_graph_get_remote_port_parent(endpoint);
1550+	of_node_put(endpoint);
1551+	if (!remote) {
1552+		dev_err(hdmi->dev, "Endpoint in port@%u unconnected\n",
1553+			hdmi->plat_data->output_port);
1554+		return -ENODEV;
1555+	}
1556+
1557+	if (!of_device_is_available(remote)) {
1558+		dev_err(hdmi->dev, "port@%u remote device is disabled\n",
1559+			hdmi->plat_data->output_port);
1560+		of_node_put(remote);
1561+		return -ENODEV;
1562+	}
1563+
1564+	hdmi->next_bridge = of_drm_find_bridge(remote);
1565+	of_node_put(remote);
1566+	if (!hdmi->next_bridge)
1567+		return -EPROBE_DEFER;
1568+
1569+	return 0;
1570+}
1571+
1572 struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
1573 			      const struct dw_hdmi_plat_data *plat_data)
1574 {
1575@@ -3221,6 +3258,10 @@
1576 	mutex_init(&hdmi->cec_notifier_mutex);
1577 	spin_lock_init(&hdmi->audio_lock);
1578
1579+	ret = dw_hdmi_parse_dt(hdmi);
1580+	if (ret < 0)
1581+		return ERR_PTR(ret);
1582+
1583 	ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
1584 	if (ddc_node) {
1585 		hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node);
1586@@ -3386,6 +3427,7 @@
1587 	hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
1588 	hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
1589 			 | DRM_BRIDGE_OP_HPD;
1590+	hdmi->bridge.interlace_allowed = true;
1591 #ifdef CONFIG_OF
1592 	hdmi->bridge.of_node = pdev->dev.of_node;
1593 #endif
1594@@ -3430,7 +3472,7 @@
1595 		hdmi->audio = platform_device_register_full(&pdevinfo);
1596 	}
1597
1598-	if (config0 & HDMI_CONFIG0_CEC) {
1599+	if (!plat_data->disable_cec && (config0 & HDMI_CONFIG0_CEC)) {
1600 		cec.hdmi = hdmi;
1601 		cec.ops = &dw_hdmi_cec_ops;
1602 		cec.irq = irq;
1603@@ -3449,8 +3491,7 @@
1604
1605 err_iahb:
1606 	clk_disable_unprepare(hdmi->iahb_clk);
1607-	if (hdmi->cec_clk)
1608-		clk_disable_unprepare(hdmi->cec_clk);
1609+	clk_disable_unprepare(hdmi->cec_clk);
1610 err_isfr:
1611 	clk_disable_unprepare(hdmi->isfr_clk);
1612 err_res:
1613@@ -3474,8 +3515,7 @@
1614
1615 	clk_disable_unprepare(hdmi->iahb_clk);
1616 	clk_disable_unprepare(hdmi->isfr_clk);
1617-	if (hdmi->cec_clk)
1618-		clk_disable_unprepare(hdmi->cec_clk);
1619+	clk_disable_unprepare(hdmi->cec_clk);
1620
1621 	if (hdmi->i2c)
1622 		i2c_del_adapter(&hdmi->i2c->adap);
1623diff -Naur a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
1624--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c	2022-12-19 17:13:12.585517887 +0800
1625+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c	2023-02-23 17:02:04.951750958 +0800
1626@@ -265,11 +265,9 @@
1627 	/* override the module pointer */
1628 	cec->adap->owner = THIS_MODULE;
1629
1630-	ret = devm_add_action(&pdev->dev, dw_hdmi_cec_del, cec);
1631-	if (ret) {
1632-		cec_delete_adapter(cec->adap);
1633+	ret = devm_add_action_or_reset(&pdev->dev, dw_hdmi_cec_del, cec);
1634+	if (ret)
1635 		return ret;
1636-	}
1637
1638 	ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
1639 					dw_hdmi_cec_hardirq,
1640diff -Naur a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
1641--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c	2022-12-19 17:13:12.589517935 +0800
1642+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c	2023-02-23 17:02:04.951750958 +0800
1643@@ -246,6 +246,7 @@
1644
1645 	struct clk *pclk;
1646
1647+	bool device_found;
1648 	unsigned int lane_mbps; /* per lane */
1649 	u32 channel;
1650 	u32 lanes;
1651@@ -309,13 +310,37 @@
1652 	return readl(dsi->base + reg);
1653 }
1654
1655+static int dw_mipi_dsi_panel_or_bridge(struct dw_mipi_dsi *dsi,
1656+				       struct device_node *node)
1657+{
1658+	struct drm_bridge *bridge;
1659+	struct drm_panel *panel;
1660+	int ret;
1661+
1662+	ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
1663+	if (ret)
1664+		return ret;
1665+
1666+	if (panel) {
1667+		bridge = drm_panel_bridge_add_typed(panel,
1668+						    DRM_MODE_CONNECTOR_DSI);
1669+		if (IS_ERR(bridge))
1670+			return PTR_ERR(bridge);
1671+	}
1672+
1673+	dsi->panel_bridge = bridge;
1674+
1675+	if (!dsi->panel_bridge)
1676+		return -EPROBE_DEFER;
1677+
1678+	return 0;
1679+}
1680+
1681 static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
1682 				   struct mipi_dsi_device *device)
1683 {
1684 	struct dw_mipi_dsi *dsi = host_to_dsi(host);
1685 	const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
1686-	struct drm_bridge *bridge;
1687-	struct drm_panel *panel;
1688 	int ret;
1689
1690 	if (device->lanes > dsi->plat_data->max_data_lanes) {
1691@@ -329,22 +354,14 @@
1692 	dsi->format = device->format;
1693 	dsi->mode_flags = device->mode_flags;
1694
1695-	ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0,
1696-					  &panel, &bridge);
1697-	if (ret)
1698-		return ret;
1699+	if (!dsi->device_found) {
1700+		ret = dw_mipi_dsi_panel_or_bridge(dsi, host->dev->of_node);
1701+		if (ret)
1702+			return ret;
1703
1704-	if (panel) {
1705-		bridge = drm_panel_bridge_add_typed(panel,
1706-						    DRM_MODE_CONNECTOR_DSI);
1707-		if (IS_ERR(bridge))
1708-			return PTR_ERR(bridge);
1709+		dsi->device_found = true;
1710 	}
1711
1712-	dsi->panel_bridge = bridge;
1713-
1714-	drm_bridge_add(&dsi->bridge);
1715-
1716 	if (pdata->host_ops && pdata->host_ops->attach) {
1717 		ret = pdata->host_ops->attach(pdata->priv_data, device);
1718 		if (ret < 0)
1719@@ -854,7 +871,8 @@
1720 	dsi_write(dsi, DSI_INT_MSK1, 0);
1721 }
1722
1723-static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
1724+static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
1725+						   struct drm_bridge_state *old_bridge_state)
1726 {
1727 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
1728 	const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
1729@@ -961,7 +979,8 @@
1730 		dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
1731 }
1732
1733-static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
1734+static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
1735+					     struct drm_bridge_state *old_bridge_state)
1736 {
1737 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
1738
1739@@ -981,7 +1000,10 @@
1740 	enum drm_mode_status mode_status = MODE_OK;
1741
1742 	if (pdata->mode_valid)
1743-		mode_status = pdata->mode_valid(pdata->priv_data, mode);
1744+		mode_status = pdata->mode_valid(pdata->priv_data, mode,
1745+						dsi->mode_flags,
1746+						dw_mipi_dsi_get_lanes(dsi),
1747+						dsi->format);
1748
1749 	return mode_status;
1750 }
1751@@ -999,17 +1021,30 @@
1752 	/* Set the encoder type as caller does not know it */
1753 	bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
1754
1755+	if (!dsi->device_found) {
1756+		int ret;
1757+
1758+		ret = dw_mipi_dsi_panel_or_bridge(dsi, dsi->dev->of_node);
1759+		if (ret)
1760+			return ret;
1761+
1762+		dsi->device_found = true;
1763+	}
1764+
1765 	/* Attach the panel-bridge to the dsi bridge */
1766 	return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
1767 				 flags);
1768 }
1769
1770 static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
1771-	.mode_set     = dw_mipi_dsi_bridge_mode_set,
1772-	.enable	      = dw_mipi_dsi_bridge_enable,
1773-	.post_disable = dw_mipi_dsi_bridge_post_disable,
1774-	.mode_valid   = dw_mipi_dsi_bridge_mode_valid,
1775-	.attach	      = dw_mipi_dsi_bridge_attach,
1776+	.atomic_duplicate_state	= drm_atomic_helper_bridge_duplicate_state,
1777+	.atomic_destroy_state	= drm_atomic_helper_bridge_destroy_state,
1778+	.atomic_reset		= drm_atomic_helper_bridge_reset,
1779+	.atomic_enable		= dw_mipi_dsi_bridge_atomic_enable,
1780+	.atomic_post_disable	= dw_mipi_dsi_bridge_post_atomic_disable,
1781+	.mode_set		= dw_mipi_dsi_bridge_mode_set,
1782+	.mode_valid		= dw_mipi_dsi_bridge_mode_valid,
1783+	.attach			= dw_mipi_dsi_bridge_attach,
1784 };
1785
1786 #ifdef CONFIG_DEBUG_FS
1787@@ -1182,6 +1217,7 @@
1788 #ifdef CONFIG_OF
1789 	dsi->bridge.of_node = pdev->dev.of_node;
1790 #endif
1791+	drm_bridge_add(&dsi->bridge);
1792
1793 	return dsi;
1794 }
1795diff -Naur a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
1796--- a/drivers/gpu/drm/drm_aperture.c	1970-01-01 08:00:00.000000000 +0800
1797+++ b/drivers/gpu/drm/drm_aperture.c	2023-02-23 17:02:04.951750958 +0800
1798@@ -0,0 +1,353 @@
1799+// SPDX-License-Identifier: MIT
1800+
1801+#include <linux/device.h>
1802+#include <linux/fb.h>
1803+#include <linux/list.h>
1804+#include <linux/mutex.h>
1805+#include <linux/pci.h>
1806+#include <linux/platform_device.h> /* for firmware helpers */
1807+#include <linux/slab.h>
1808+#include <linux/types.h>
1809+#include <linux/vgaarb.h>
1810+
1811+#include <drm/drm_aperture.h>
1812+#include <drm/drm_drv.h>
1813+#include <drm/drm_print.h>
1814+
1815+/**
1816+ * DOC: overview
1817+ *
1818+ * A graphics device might be supported by different drivers, but only one
1819+ * driver can be active at any given time. Many systems load a generic
1820+ * graphics drivers, such as EFI-GOP or VESA, early during the boot process.
1821+ * During later boot stages, they replace the generic driver with a dedicated,
1822+ * hardware-specific driver. To take over the device the dedicated driver
1823+ * first has to remove the generic driver. DRM aperture functions manage
1824+ * ownership of DRM framebuffer memory and hand-over between drivers.
1825+ *
1826+ * DRM drivers should call drm_aperture_remove_conflicting_framebuffers()
1827+ * at the top of their probe function. The function removes any generic
1828+ * driver that is currently associated with the given framebuffer memory.
1829+ * If the framebuffer is located at PCI BAR 0, the rsp code looks as in the
1830+ * example given below.
1831+ *
1832+ * .. code-block:: c
1833+ *
1834+ *	static const struct drm_driver example_driver = {
1835+ *		...
1836+ *	};
1837+ *
1838+ *	static int remove_conflicting_framebuffers(struct pci_dev *pdev)
1839+ *	{
1840+ *		bool primary = false;
1841+ *		resource_size_t base, size;
1842+ *		int ret;
1843+ *
1844+ *		base = pci_resource_start(pdev, 0);
1845+ *		size = pci_resource_len(pdev, 0);
1846+ *	#ifdef CONFIG_X86
1847+ *		primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1848+ *	#endif
1849+ *
1850+ *		return drm_aperture_remove_conflicting_framebuffers(base, size, primary,
1851+ *		                                                    &example_driver);
1852+ *	}
1853+ *
1854+ *	static int probe(struct pci_dev *pdev)
1855+ *	{
1856+ *		int ret;
1857+ *
1858+ *		// Remove any generic drivers...
1859+ *		ret = remove_conflicting_framebuffers(pdev);
1860+ *		if (ret)
1861+ *			return ret;
1862+ *
1863+ *		// ... and initialize the hardware.
1864+ *		...
1865+ *
1866+ *		drm_dev_register();
1867+ *
1868+ *		return 0;
1869+ *	}
1870+ *
1871+ * PCI device drivers should call
1872+ * drm_aperture_remove_conflicting_pci_framebuffers() and let it detect the
1873+ * framebuffer apertures automatically. Device drivers without knowledge of
1874+ * the framebuffer's location shall call drm_aperture_remove_framebuffers(),
1875+ * which removes all drivers for known framebuffer.
1876+ *
1877+ * Drivers that are susceptible to being removed by other drivers, such as
1878+ * generic EFI or VESA drivers, have to register themselves as owners of their
1879+ * given framebuffer memory. Ownership of the framebuffer memory is achieved
1880+ * by calling devm_aperture_acquire_from_firmware(). On success, the driver
1881+ * is the owner of the framebuffer range. The function fails if the
1882+ * framebuffer is already by another driver. See below for an example.
1883+ *
1884+ * .. code-block:: c
1885+ *
1886+ *	static int acquire_framebuffers(struct drm_device *dev, struct platform_device *pdev)
1887+ *	{
1888+ *		resource_size_t base, size;
1889+ *
1890+ *		mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1891+ *		if (!mem)
1892+ *			return -EINVAL;
1893+ *		base = mem->start;
1894+ *		size = resource_size(mem);
1895+ *
1896+ *		return devm_acquire_aperture_from_firmware(dev, base, size);
1897+ *	}
1898+ *
1899+ *	static int probe(struct platform_device *pdev)
1900+ *	{
1901+ *		struct drm_device *dev;
1902+ *		int ret;
1903+ *
1904+ *		// ... Initialize the device...
1905+ *		dev = devm_drm_dev_alloc();
1906+ *		...
1907+ *
1908+ *		// ... and acquire ownership of the framebuffer.
1909+ *		ret = acquire_framebuffers(dev, pdev);
1910+ *		if (ret)
1911+ *			return ret;
1912+ *
1913+ *		drm_dev_register(dev, 0);
1914+ *
1915+ *		return 0;
1916+ *	}
1917+ *
1918+ * The generic driver is now subject to forced removal by other drivers. This
1919+ * only works for platform drivers that support hot unplug.
1920+ * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al
1921+ * for the registered framebuffer range, the aperture helpers call
1922+ * platform_device_unregister() and the generic driver unloads itself. It
1923+ * may not access the device's registers, framebuffer memory, ROM, etc
1924+ * afterwards.
1925+ */
1926+
1927+struct drm_aperture {
1928+	struct drm_device *dev;
1929+	resource_size_t base;
1930+	resource_size_t size;
1931+	struct list_head lh;
1932+	void (*detach)(struct drm_device *dev);
1933+};
1934+
1935+static LIST_HEAD(drm_apertures);
1936+static DEFINE_MUTEX(drm_apertures_lock);
1937+
1938+static bool overlap(resource_size_t base1, resource_size_t end1,
1939+		    resource_size_t base2, resource_size_t end2)
1940+{
1941+	return (base1 < end2) && (end1 > base2);
1942+}
1943+
1944+static void devm_aperture_acquire_release(void *data)
1945+{
1946+	struct drm_aperture *ap = data;
1947+	bool detached = !ap->dev;
1948+
1949+	if (detached)
1950+		return;
1951+
1952+	mutex_lock(&drm_apertures_lock);
1953+	list_del(&ap->lh);
1954+	mutex_unlock(&drm_apertures_lock);
1955+}
1956+
1957+static int devm_aperture_acquire(struct drm_device *dev,
1958+				 resource_size_t base, resource_size_t size,
1959+				 void (*detach)(struct drm_device *))
1960+{
1961+	size_t end = base + size;
1962+	struct list_head *pos;
1963+	struct drm_aperture *ap;
1964+
1965+	mutex_lock(&drm_apertures_lock);
1966+
1967+	list_for_each(pos, &drm_apertures) {
1968+		ap = container_of(pos, struct drm_aperture, lh);
1969+		if (overlap(base, end, ap->base, ap->base + ap->size)) {
1970+			mutex_unlock(&drm_apertures_lock);
1971+			return -EBUSY;
1972+		}
1973+	}
1974+
1975+	ap = devm_kzalloc(dev->dev, sizeof(*ap), GFP_KERNEL);
1976+	if (!ap) {
1977+		mutex_unlock(&drm_apertures_lock);
1978+		return -ENOMEM;
1979+	}
1980+
1981+	ap->dev = dev;
1982+	ap->base = base;
1983+	ap->size = size;
1984+	ap->detach = detach;
1985+	INIT_LIST_HEAD(&ap->lh);
1986+
1987+	list_add(&ap->lh, &drm_apertures);
1988+
1989+	mutex_unlock(&drm_apertures_lock);
1990+
1991+	return devm_add_action_or_reset(dev->dev, devm_aperture_acquire_release, ap);
1992+}
1993+
1994+static void drm_aperture_detach_firmware(struct drm_device *dev)
1995+{
1996+	struct platform_device *pdev = to_platform_device(dev->dev);
1997+
1998+	/*
1999+	 * Remove the device from the device hierarchy. This is the right thing
2000+	 * to do for firmware-based DRM drivers, such as EFI, VESA or VGA. After
2001+	 * the new driver takes over the hardware, the firmware device's state
2002+	 * will be lost.
2003+	 *
2004+	 * For non-platform devices, a new callback would be required.
2005+	 *
2006+	 * If the aperture helpers ever need to handle native drivers, this call
2007+	 * would only have to unplug the DRM device, so that the hardware device
2008+	 * stays around after detachment.
2009+	 */
2010+	platform_device_unregister(pdev);
2011+}
2012+
2013+/**
2014+ * devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer
2015+ *                                       on behalf of a DRM driver.
2016+ * @dev:	the DRM device to own the framebuffer memory
2017+ * @base:	the framebuffer's byte offset in physical memory
2018+ * @size:	the framebuffer size in bytes
2019+ *
2020+ * Installs the given device as the new owner of the framebuffer. The function
2021+ * expects the framebuffer to be provided by a platform device that has been
2022+ * set up by firmware. Firmware can be any generic interface, such as EFI,
2023+ * VESA, VGA, etc. If the native hardware driver takes over ownership of the
2024+ * framebuffer range, the firmware state gets lost. Aperture helpers will then
2025+ * unregister the platform device automatically. Acquired apertures are
2026+ * released automatically if the underlying device goes away.
2027+ *
2028+ * The function fails if the framebuffer range, or parts of it, is currently
2029+ * owned by another driver. To evict current owners, callers should use
2030+ * drm_aperture_remove_conflicting_framebuffers() et al. before calling this
2031+ * function. The function also fails if the given device is not a platform
2032+ * device.
2033+ *
2034+ * Returns:
2035+ * 0 on success, or a negative errno value otherwise.
2036+ */
2037+int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
2038+					resource_size_t size)
2039+{
2040+	if (drm_WARN_ON(dev, !dev_is_platform(dev->dev)))
2041+		return -EINVAL;
2042+
2043+	return devm_aperture_acquire(dev, base, size, drm_aperture_detach_firmware);
2044+}
2045+EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
2046+
2047+static void drm_aperture_detach_drivers(resource_size_t base, resource_size_t size)
2048+{
2049+	resource_size_t end = base + size;
2050+	struct list_head *pos, *n;
2051+
2052+	mutex_lock(&drm_apertures_lock);
2053+
2054+	list_for_each_safe(pos, n, &drm_apertures) {
2055+		struct drm_aperture *ap =
2056+			container_of(pos, struct drm_aperture, lh);
2057+		struct drm_device *dev = ap->dev;
2058+
2059+		if (WARN_ON_ONCE(!dev))
2060+			continue;
2061+
2062+		if (!overlap(base, end, ap->base, ap->base + ap->size))
2063+			continue;
2064+
2065+		ap->dev = NULL; /* detach from device */
2066+		list_del(&ap->lh);
2067+
2068+		ap->detach(dev);
2069+	}
2070+
2071+	mutex_unlock(&drm_apertures_lock);
2072+}
2073+
2074+/**
2075+ * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range
2076+ * @base: the aperture's base address in physical memory
2077+ * @size: aperture size in bytes
2078+ * @primary: also kick vga16fb if present
2079+ * @req_driver: requesting DRM driver
2080+ *
2081+ * This function removes graphics device drivers which use memory range described by
2082+ * @base and @size.
2083+ *
2084+ * Returns:
2085+ * 0 on success, or a negative errno code otherwise
2086+ */
2087+int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
2088+						 bool primary, const struct drm_driver *req_driver)
2089+{
2090+#if IS_REACHABLE(CONFIG_FB)
2091+	struct apertures_struct *a;
2092+	int ret;
2093+
2094+	a = alloc_apertures(1);
2095+	if (!a)
2096+		return -ENOMEM;
2097+
2098+	a->ranges[0].base = base;
2099+	a->ranges[0].size = size;
2100+
2101+	ret = remove_conflicting_framebuffers(a, req_driver->name, primary);
2102+	kfree(a);
2103+
2104+	if (ret)
2105+		return ret;
2106+#endif
2107+
2108+	drm_aperture_detach_drivers(base, size);
2109+
2110+	return 0;
2111+}
2112+EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
2113+
2114+/**
2115+ * drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices
2116+ * @pdev: PCI device
2117+ * @req_driver: requesting DRM driver
2118+ *
2119+ * This function removes graphics device drivers using memory range configured
2120+ * for any of @pdev's memory bars. The function assumes that PCI device with
2121+ * shadowed ROM drives a primary display and so kicks out vga16fb.
2122+ *
2123+ * Returns:
2124+ * 0 on success, or a negative errno code otherwise
2125+ */
2126+int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
2127+						     const struct drm_driver *req_driver)
2128+{
2129+	resource_size_t base, size;
2130+	int bar, ret = 0;
2131+
2132+	for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
2133+		if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
2134+			continue;
2135+		base = pci_resource_start(pdev, bar);
2136+		size = pci_resource_len(pdev, bar);
2137+		drm_aperture_detach_drivers(base, size);
2138+	}
2139+
2140+	/*
2141+	 * WARNING: Apparently we must kick fbdev drivers before vgacon,
2142+	 * otherwise the vga fbdev driver falls over.
2143+	 */
2144+#if IS_REACHABLE(CONFIG_FB)
2145+	ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
2146+#endif
2147+	if (ret == 0)
2148+		ret = vga_remove_vgacon(pdev);
2149+	return ret;
2150+}
2151+EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);
2152diff -Naur a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
2153--- a/drivers/gpu/drm/drm_connector.c	2022-12-19 17:13:12.593517982 +0800
2154+++ b/drivers/gpu/drm/drm_connector.c	2023-02-23 17:02:04.951750958 +0800
2155@@ -2144,6 +2144,55 @@
2156 EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
2157
2158 /**
2159+ * drm_connector_attach_hdr_output_metadata_property - attach "HDR_OUTPUT_METADA" property
2160+ * @connector: connector to attach the property on.
2161+ *
2162+ * This is used to allow the userspace to send HDR Metadata to the
2163+ * driver.
2164+ *
2165+ * Returns:
2166+ * Zero on success, negative errno on failure.
2167+ */
2168+int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector)
2169+{
2170+	struct drm_device *dev = connector->dev;
2171+	struct drm_property *prop = dev->mode_config.hdr_output_metadata_property;
2172+
2173+	drm_object_attach_property(&connector->base, prop, 0);
2174+
2175+	return 0;
2176+}
2177+EXPORT_SYMBOL(drm_connector_attach_hdr_output_metadata_property);
2178+
2179+/**
2180+ * drm_connector_atomic_hdr_metadata_equal - checks if the hdr metadata changed
2181+ * @old_state: old connector state to compare
2182+ * @new_state: new connector state to compare
2183+ *
2184+ * This is used by HDR-enabled drivers to test whether the HDR metadata
2185+ * have changed between two different connector state (and thus probably
2186+ * requires a full blown mode change).
2187+ *
2188+ * Returns:
2189+ * True if the metadata are equal, False otherwise
2190+ */
2191+bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
2192+					     struct drm_connector_state *new_state)
2193+{
2194+	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
2195+	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
2196+
2197+	if (!old_blob || !new_blob)
2198+		return old_blob == new_blob;
2199+
2200+	if (old_blob->length != new_blob->length)
2201+		return false;
2202+
2203+	return !memcmp(old_blob->data, new_blob->data, old_blob->length);
2204+}
2205+EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
2206+
2207+/**
2208  * drm_connector_set_vrr_capable_property - sets the variable refresh rate
2209  * capable property for a connector
2210  * @connector: drm connector
2211diff -Naur a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
2212--- a/drivers/gpu/drm/drm_ioctl.c	2022-12-19 17:13:12.597518030 +0800
2213+++ b/drivers/gpu/drm/drm_ioctl.c	2023-02-23 17:02:04.955751014 +0800
2214@@ -678,9 +678,9 @@
2215 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
2216 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
2217 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
2218-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
2219-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
2220-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
2221+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_RENDER_ALLOW),
2222+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_RENDER_ALLOW),
2223+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_RENDER_ALLOW),
2224 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
2225 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
2226 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
2227diff -Naur a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
2228--- a/drivers/gpu/drm/drm_plane_helper.c	2022-12-19 17:13:12.601518079 +0800
2229+++ b/drivers/gpu/drm/drm_plane_helper.c	2023-02-23 17:02:04.955751014 +0800
2230@@ -123,6 +123,7 @@
2231 		.crtc_w = drm_rect_width(dst),
2232 		.crtc_h = drm_rect_height(dst),
2233 		.rotation = rotation,
2234+		.visible = *visible,
2235 	};
2236 	struct drm_crtc_state crtc_state = {
2237 		.crtc = crtc,
2238diff -Naur a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
2239--- a/drivers/gpu/drm/drm_vblank.c	2022-12-19 17:13:12.601518079 +0800
2240+++ b/drivers/gpu/drm/drm_vblank.c	2023-02-23 17:02:04.955751014 +0800
2241@@ -1725,6 +1725,15 @@
2242 	reply->tval_usec = ts.tv_nsec / 1000;
2243 }
2244
2245+static bool drm_wait_vblank_supported(struct drm_device *dev)
2246+{
2247+#if IS_ENABLED(CONFIG_DRM_LEGACY)
2248+	if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY)))
2249+		return dev->irq_enabled;
2250+#endif
2251+	return drm_dev_has_vblank(dev);
2252+}
2253+
2254 int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
2255 			  struct drm_file *file_priv)
2256 {
2257@@ -1736,7 +1745,7 @@
2258 	unsigned int pipe_index;
2259 	unsigned int flags, pipe, high_pipe;
2260
2261-	if (!dev->irq_enabled)
2262+	if (!drm_wait_vblank_supported(dev))
2263 		return -EOPNOTSUPP;
2264
2265 	if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
2266@@ -2011,7 +2020,7 @@
2267 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
2268 		return -EOPNOTSUPP;
2269
2270-	if (!dev->irq_enabled)
2271+	if (!drm_dev_has_vblank(dev))
2272 		return -EOPNOTSUPP;
2273
2274 	crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
2275@@ -2070,7 +2079,7 @@
2276 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
2277 		return -EOPNOTSUPP;
2278
2279-	if (!dev->irq_enabled)
2280+	if (!drm_dev_has_vblank(dev))
2281 		return -EOPNOTSUPP;
2282
2283 	crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
2284diff -Naur a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
2285--- a/drivers/gpu/drm/Makefile	2022-12-19 17:13:11.981510649 +0800
2286+++ b/drivers/gpu/drm/Makefile	2023-02-23 17:02:04.955751014 +0800
2287@@ -18,7 +18,7 @@
2288 		drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
2289 		drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
2290 		drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
2291-		drm_managed.o drm_vblank_work.o
2292+		drm_managed.o drm_vblank_work.o drm_aperture.o
2293
2294 drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
2295 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
2296diff -Naur a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
2297--- a/drivers/gpu/drm/meson/Kconfig	2022-12-19 17:13:12.677518989 +0800
2298+++ b/drivers/gpu/drm/meson/Kconfig	2023-02-23 17:02:04.955751014 +0800
2299@@ -6,9 +6,11 @@
2300 	select DRM_KMS_HELPER
2301 	select DRM_KMS_CMA_HELPER
2302 	select DRM_GEM_CMA_HELPER
2303+	select DRM_DISPLAY_CONNECTOR
2304 	select VIDEOMODE_HELPERS
2305 	select REGMAP_MMIO
2306 	select MESON_CANVAS
2307+	select CEC_CORE if CEC_NOTIFIER
2308
2309 config DRM_MESON_DW_HDMI
2310 	tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
2311@@ -16,3 +18,10 @@
2312 	default y if DRM_MESON
2313 	select DRM_DW_HDMI
2314 	imply DRM_DW_HDMI_I2S_AUDIO
2315+
2316+config DRM_MESON_DW_MIPI_DSI
2317+	tristate "MIPI DSI Synopsys Controller support for Amlogic Meson Display"
2318+	depends on DRM_MESON
2319+	default y if DRM_MESON
2320+	select DRM_DW_MIPI_DSI
2321+	select GENERIC_PHY_MIPI_DPHY
2322diff -Naur a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
2323--- a/drivers/gpu/drm/meson/Makefile	2022-12-19 17:13:12.677518989 +0800
2324+++ b/drivers/gpu/drm/meson/Makefile	2023-02-23 17:02:04.955751014 +0800
2325@@ -1,7 +1,9 @@
2326 # SPDX-License-Identifier: GPL-2.0-only
2327-meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
2328+meson-drm-y := meson_drv.o meson_plane.o meson_cursor.o meson_crtc.o meson_encoder_cvbs.o
2329 meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
2330 meson-drm-y += meson_rdma.o meson_osd_afbcd.o
2331+meson-drm-y += meson_encoder_hdmi.o meson_encoder_dsi.o
2332
2333 obj-$(CONFIG_DRM_MESON) += meson-drm.o
2334 obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
2335+obj-$(CONFIG_DRM_MESON_DW_MIPI_DSI) += meson_dw_mipi_dsi.o
2336diff -Naur a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
2337--- a/drivers/gpu/drm/meson/meson_crtc.c	2022-12-19 17:13:12.677518989 +0800
2338+++ b/drivers/gpu/drm/meson/meson_crtc.c	2023-02-23 17:02:04.955751014 +0800
2339@@ -36,6 +36,7 @@
2340 	struct drm_pending_vblank_event *event;
2341 	struct meson_drm *priv;
2342 	void (*enable_osd1)(struct meson_drm *priv);
2343+	void (*enable_osd2)(struct meson_drm *priv);
2344 	void (*enable_vd1)(struct meson_drm *priv);
2345 	void (*enable_osd1_afbc)(struct meson_drm *priv);
2346 	void (*disable_osd1_afbc)(struct meson_drm *priv);
2347@@ -45,6 +46,10 @@
2348 };
2349 #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
2350
2351+static bool s_force_commit = false;
2352+static bool s_force_osd1_disable = false;
2353+static bool s_force_video_zorder_up = false;
2354+
2355 /* CRTC */
2356
2357 static int meson_crtc_enable_vblank(struct drm_crtc *crtc)
2358@@ -81,8 +86,11 @@
2359
2360 };
2361
2362+struct drm_display_mode meson_display_mode = { 0 };
2363+EXPORT_SYMBOL(meson_display_mode);
2364+
2365 static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
2366-					  struct drm_crtc_state *old_state)
2367+					  struct drm_crtc_state *old_crtc_state)
2368 {
2369 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
2370 	struct drm_crtc_state *crtc_state = crtc->state;
2371@@ -110,15 +118,31 @@
2372 	writel_relaxed(0 << 16 |
2373 			(crtc_state->mode.vdisplay - 1),
2374 			priv->io_base + _REG(VPP_OSD1_BLD_V_SCOPE));
2375+	writel_relaxed(0 << 16 |
2376+			(crtc_state->mode.hdisplay - 1),
2377+			priv->io_base + _REG(VPP_OSD2_BLD_H_SCOPE));
2378+	writel_relaxed(0 << 16 |
2379+			(crtc_state->mode.vdisplay - 1),
2380+			priv->io_base + _REG(VPP_OSD2_BLD_V_SCOPE));
2381+	writel_relaxed(crtc_state->mode.hdisplay |
2382+			crtc_state->mode.vdisplay << 16,
2383+		       priv->io_base +
2384+		       _REG(VIU_OSD_BLEND_BLEND0_SIZE));
2385+	writel_relaxed(crtc_state->mode.hdisplay |
2386+			crtc_state->mode.vdisplay << 16,
2387+		       priv->io_base +
2388+		       _REG(VIU_OSD_BLEND_BLEND1_SIZE));
2389 	writel_relaxed(crtc_state->mode.hdisplay << 16 |
2390 			crtc_state->mode.vdisplay,
2391 			priv->io_base + _REG(VPP_OUT_H_V_SIZE));
2392
2393 	drm_crtc_vblank_on(crtc);
2394+
2395+	memcpy(&meson_display_mode, &crtc_state->mode, sizeof(meson_display_mode));
2396 }
2397
2398 static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
2399-				     struct drm_crtc_state *old_state)
2400+				     struct drm_crtc_state *old_crtc_state)
2401 {
2402 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
2403 	struct drm_crtc_state *crtc_state = crtc->state;
2404@@ -146,7 +170,7 @@
2405 }
2406
2407 static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
2408-					   struct drm_crtc_state *old_state)
2409+					   struct drm_crtc_state *old_crtc_state)
2410 {
2411 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
2412 	struct meson_drm *priv = meson_crtc->priv;
2413@@ -158,6 +182,9 @@
2414 	priv->viu.osd1_enabled = false;
2415 	priv->viu.osd1_commit = false;
2416
2417+	priv->viu.osd2_enabled = false;
2418+	priv->viu.osd2_commit = false;
2419+
2420 	priv->viu.vd1_enabled = false;
2421 	priv->viu.vd1_commit = false;
2422
2423@@ -171,7 +198,7 @@
2424 }
2425
2426 static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
2427-				      struct drm_crtc_state *old_state)
2428+				      struct drm_crtc_state *old_crtc_state)
2429 {
2430 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
2431 	struct meson_drm *priv = meson_crtc->priv;
2432@@ -183,11 +210,14 @@
2433 	priv->viu.osd1_enabled = false;
2434 	priv->viu.osd1_commit = false;
2435
2436+	priv->viu.osd2_enabled = false;
2437+	priv->viu.osd2_commit = false;
2438+
2439 	priv->viu.vd1_enabled = false;
2440 	priv->viu.vd1_commit = false;
2441
2442 	/* Disable VPP Postblend */
2443-	writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND |
2444+	writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND | VPP_VD1_POSTBLEND |
2445 			    VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0,
2446 			    priv->io_base + _REG(VPP_MISC));
2447
2448@@ -201,7 +231,7 @@
2449 }
2450
2451 static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
2452-				    struct drm_crtc_state *state)
2453+				    struct drm_crtc_state *old_crtc_state)
2454 {
2455 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
2456 	unsigned long flags;
2457@@ -223,6 +253,7 @@
2458 	struct meson_drm *priv = meson_crtc->priv;
2459
2460 	priv->viu.osd1_commit = true;
2461+	priv->viu.osd2_commit = true;
2462 	priv->viu.vd1_commit = true;
2463 }
2464
2465@@ -246,6 +277,12 @@
2466 			    priv->io_base + _REG(VPP_MISC));
2467 }
2468
2469+static void meson_crtc_enable_osd2(struct meson_drm *priv)
2470+{
2471+	writel_bits_relaxed(VPP_OSD2_POSTBLEND, VPP_OSD2_POSTBLEND,
2472+			    priv->io_base + _REG(VPP_MISC));
2473+}
2474+
2475 static void meson_crtc_g12a_enable_osd1_afbc(struct meson_drm *priv)
2476 {
2477 	writel_relaxed(priv->viu.osd1_blk2_cfg4,
2478@@ -274,14 +311,25 @@
2479 	writel_relaxed(priv->viu.osd_blend_din0_scope_v,
2480 		       priv->io_base +
2481 		       _REG(VIU_OSD_BLEND_DIN0_SCOPE_V));
2482-	writel_relaxed(priv->viu.osb_blend0_size,
2483+	if (s_force_video_zorder_up) {
2484+		writel_bits_relaxed(0xF << 8, OSD_BLEND_POSTBLD_SRC_VD1,
2485+				priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
2486+	} else {
2487+		writel_bits_relaxed(OSD_BLEND_POSTBLD_SRC_OSD1, OSD_BLEND_POSTBLD_SRC_OSD1,
2488+				priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
2489+	}
2490+}
2491+
2492+static void meson_g12a_crtc_enable_osd2(struct meson_drm *priv)
2493+{
2494+	writel_relaxed(priv->viu.osd_blend_din3_scope_h,
2495 		       priv->io_base +
2496-		       _REG(VIU_OSD_BLEND_BLEND0_SIZE));
2497-	writel_relaxed(priv->viu.osb_blend1_size,
2498+		       _REG(VIU_OSD_BLEND_DIN1_SCOPE_H));
2499+	writel_relaxed(priv->viu.osd_blend_din3_scope_v,
2500 		       priv->io_base +
2501-		       _REG(VIU_OSD_BLEND_BLEND1_SIZE));
2502-	writel_bits_relaxed(3 << 8, 3 << 8,
2503-			    priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
2504+		       _REG(VIU_OSD_BLEND_DIN1_SCOPE_V));
2505+	writel_bits_relaxed(OSD_BLEND_POSTBLD_SRC_OSD2, OSD_BLEND_POSTBLD_SRC_OSD2,
2506+			    priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
2507 }
2508
2509 static void meson_crtc_enable_vd1(struct meson_drm *priv)
2510@@ -315,8 +363,22 @@
2511 	struct meson_crtc *meson_crtc = to_meson_crtc(priv->crtc);
2512 	unsigned long flags;
2513
2514+	if (s_force_commit) {
2515+		s_force_commit = false;
2516+		priv->viu.osd1_commit = true;
2517+		if (s_force_osd1_disable) {
2518+			writel_bits_relaxed(OSD_BLEND_POSTBLD_SRC_OSD1, 0,
2519+					priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
2520+		} else if (s_force_video_zorder_up) {
2521+			writel_bits_relaxed(0xF << 8, OSD_BLEND_POSTBLD_SRC_VD1,
2522+					priv->io_base + _REG(OSD1_BLEND_SRC_CTRL)); // OSD1 postblend src -> vd1
2523+			writel_bits_relaxed(0xF << 8, OSD_BLEND_POSTBLD_SRC_OSD1,
2524+					priv->io_base + _REG(VD1_BLEND_SRC_CTRL)); // VD1 postblend src -> osd1
2525+		}
2526+	}
2527+
2528 	/* Update the OSD registers */
2529-	if (priv->viu.osd1_enabled && priv->viu.osd1_commit) {
2530+	if (!s_force_osd1_disable && priv->viu.osd1_enabled && priv->viu.osd1_commit) {
2531 		writel_relaxed(priv->viu.osd1_ctrl_stat,
2532 				priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
2533 		writel_relaxed(priv->viu.osd1_ctrl_stat2,
2534@@ -388,6 +450,43 @@
2535 		priv->viu.osd1_commit = false;
2536 	}
2537
2538+	if (priv->viu.osd2_enabled && priv->viu.osd2_commit) {
2539+		writel_relaxed(priv->viu.osd2_ctrl_stat,
2540+				priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
2541+		writel_relaxed(priv->viu.osd2_ctrl_stat2,
2542+				priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
2543+		writel_relaxed(priv->viu.osd2_blk0_cfg[0],
2544+				priv->io_base + _REG(VIU_OSD2_BLK0_CFG_W0));
2545+		writel_relaxed(priv->viu.osd2_blk0_cfg[1],
2546+				priv->io_base + _REG(VIU_OSD2_BLK0_CFG_W1));
2547+		writel_relaxed(priv->viu.osd2_blk0_cfg[2],
2548+				priv->io_base + _REG(VIU_OSD2_BLK0_CFG_W2));
2549+		writel_relaxed(priv->viu.osd2_blk0_cfg[3],
2550+				priv->io_base + _REG(VIU_OSD2_BLK0_CFG_W3));
2551+		writel_relaxed(priv->viu.osd2_blk0_cfg[4],
2552+				priv->io_base + _REG(VIU_OSD2_BLK0_CFG_W4));
2553+
2554+		/* vsync forced to update INTERLACE_SEL_ODD in interlace mode */
2555+		meson_crtc->vsync_forced = priv->viu.osd2_interlace;
2556+
2557+		meson_canvas_config(priv->canvas, priv->canvas_id_osd2,
2558+				priv->viu.osd2_addr,
2559+				priv->viu.osd2_stride,
2560+				priv->viu.osd2_height,
2561+				MESON_CANVAS_WRAP_NONE,
2562+				MESON_CANVAS_BLKMODE_LINEAR, 0);
2563+
2564+		/* Enable OSD2 */
2565+		if (meson_crtc->enable_osd2)
2566+			meson_crtc->enable_osd2(priv);
2567+
2568+		priv->viu.osd2_commit = false;
2569+	} else if (priv->viu.osd2_enabled && priv->viu.osd2_interlace) {
2570+		u32 reg = readl_relaxed(priv->io_base + _REG(VIU_OSD2_BLK0_CFG_W0)) & ~BIT(0);
2571+		writel_relaxed(reg | meson_venci_get_field(priv) ? 1 : 0,
2572+				priv->io_base + _REG(VIU_OSD2_BLK0_CFG_W0));
2573+	}
2574+
2575 	/* Update the VD1 registers */
2576 	if (priv->viu.vd1_enabled && priv->viu.vd1_commit) {
2577
2578@@ -671,12 +770,77 @@
2579 	spin_unlock_irqrestore(&priv->drm->event_lock, flags);
2580 }
2581
2582+static ssize_t enable_osd_store(struct class *class,
2583+		struct class_attribute *attr,
2584+		const char *buf, size_t count)
2585+{
2586+	pr_info("enable_osd_store: %s\n", buf);
2587+	int osd1_enabled = 1;
2588+	if (1 != sscanf(buf, "%d", &osd1_enabled)) {
2589+		return 0;
2590+	}
2591+	s_force_osd1_disable = !osd1_enabled;
2592+	s_force_commit = true;
2593+	pr_info("s_force_osd1_disable=%d\n", s_force_osd1_disable);
2594+	return count;
2595+}
2596+
2597+static ssize_t enable_osd_show(struct class *class,
2598+		struct class_attribute *attr,
2599+		char *buf)
2600+{
2601+	return sprintf(buf, "%d\n", s_force_osd1_disable ? 0 : 1);
2602+}
2603+
2604+static ssize_t video_zorder_up_store(struct class *class,
2605+		struct class_attribute *attr,
2606+		const char *buf, size_t count)
2607+{
2608+	pr_info("enable_osd_store: %s\n", buf);
2609+	int video_zorder_up = 0;
2610+	if (1 != sscanf(buf, "%d", &video_zorder_up)) {
2611+		return 0;
2612+	}
2613+	s_force_video_zorder_up = video_zorder_up;
2614+	s_force_commit = true;
2615+	pr_info("s_force_video_zorder_up=%d\n", s_force_video_zorder_up);
2616+	return count;
2617+}
2618+
2619+static ssize_t video_zorder_up_show(struct class *class,
2620+		struct class_attribute *attr,
2621+		char *buf)
2622+{
2623+	return sprintf(buf, "%d\n", s_force_video_zorder_up ? 1 : 0);
2624+}
2625+
2626+static CLASS_ATTR_RW(enable_osd);
2627+static CLASS_ATTR_RW(video_zorder_up);
2628+
2629+
2630+static struct attribute *meson_crtc_class_attrs[] = {
2631+	&class_attr_enable_osd.attr,
2632+	&class_attr_video_zorder_up.attr,
2633+	NULL
2634+};
2635+
2636+ATTRIBUTE_GROUPS(meson_crtc_class);
2637+static struct class meson_crtc_class = {
2638+	.name = "meson_crtc",
2639+	.class_groups = meson_crtc_class_groups,
2640+};
2641+
2642+
2643 int meson_crtc_create(struct meson_drm *priv)
2644 {
2645 	struct meson_crtc *meson_crtc;
2646 	struct drm_crtc *crtc;
2647 	int ret;
2648
2649+	ret = class_register(&meson_crtc_class);
2650+	if (ret < 0)
2651+		return ret;
2652+
2653 	meson_crtc = devm_kzalloc(priv->drm->dev, sizeof(*meson_crtc),
2654 				  GFP_KERNEL);
2655 	if (!meson_crtc)
2656@@ -685,7 +849,7 @@
2657 	meson_crtc->priv = priv;
2658 	crtc = &meson_crtc->base;
2659 	ret = drm_crtc_init_with_planes(priv->drm, crtc,
2660-					priv->primary_plane, NULL,
2661+					priv->primary_plane, priv->cursor_plane,
2662 					&meson_crtc_funcs, "meson_crtc");
2663 	if (ret) {
2664 		dev_err(priv->drm->dev, "Failed to init CRTC\n");
2665@@ -694,6 +858,7 @@
2666
2667 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
2668 		meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
2669+		meson_crtc->enable_osd2 = meson_g12a_crtc_enable_osd2;
2670 		meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
2671 		meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
2672 		meson_crtc->enable_osd1_afbc =
2673@@ -703,6 +868,7 @@
2674 		drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
2675 	} else {
2676 		meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
2677+		meson_crtc->enable_osd2 = meson_crtc_enable_osd2;
2678 		meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
2679 		if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
2680 			meson_crtc->enable_osd1_afbc =
2681diff -Naur a/drivers/gpu/drm/meson/meson_cursor.c b/drivers/gpu/drm/meson/meson_cursor.c
2682--- a/drivers/gpu/drm/meson/meson_cursor.c	1970-01-01 08:00:00.000000000 +0800
2683+++ b/drivers/gpu/drm/meson/meson_cursor.c	2023-02-23 17:02:04.955751014 +0800
2684@@ -0,0 +1,244 @@
2685+// SPDX-License-Identifier: GPL-2.0-or-later
2686+/*
2687+ * Copyright (C) 2021 BayLibre, SAS
2688+ * Author: Neil Armstrong <narmstrong@baylibre.com>
2689+ */
2690+
2691+#include <linux/bitfield.h>
2692+
2693+#include <drm/drm_atomic.h>
2694+#include <drm/drm_atomic_helper.h>
2695+#include <drm/drm_device.h>
2696+#include <drm/drm_fb_cma_helper.h>
2697+#include <drm/drm_fourcc.h>
2698+#include <drm/drm_gem_cma_helper.h>
2699+#include <drm/drm_gem_framebuffer_helper.h>
2700+#include <drm/drm_plane_helper.h>
2701+
2702+#include "meson_cursor.h"
2703+#include "meson_registers.h"
2704+#include "meson_viu.h"
2705+
2706+struct meson_cursor {
2707+	struct drm_plane base;
2708+	struct meson_drm *priv;
2709+};
2710+#define to_meson_cursor(x) container_of(x, struct meson_cursor, base)
2711+
2712+static int meson_cursor_atomic_check(struct drm_plane *plane,
2713+				    struct drm_plane_state *state)
2714+{
2715+	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state->state,
2716+										 plane);
2717+	struct drm_crtc_state *crtc_state;
2718+
2719+	if (!new_plane_state->crtc)
2720+		return 0;
2721+
2722+	crtc_state = drm_atomic_get_crtc_state(state->state,
2723+					       new_plane_state->crtc);
2724+	if (IS_ERR(crtc_state))
2725+		return PTR_ERR(crtc_state);
2726+
2727+	return drm_atomic_helper_check_plane_state(new_plane_state,
2728+						   crtc_state,
2729+						   DRM_PLANE_HELPER_NO_SCALING,
2730+						   DRM_PLANE_HELPER_NO_SCALING,
2731+						   true, true);
2732+}
2733+
2734+/* Takes a fixed 16.16 number and converts it to integer. */
2735+static inline int64_t fixed16_to_int(int64_t value)
2736+{
2737+	return value >> 16;
2738+}
2739+
2740+static void meson_cursor_atomic_update(struct drm_plane *plane,
2741+				      struct drm_plane_state *state)
2742+{
2743+	struct meson_cursor *meson_cursor = to_meson_cursor(plane);
2744+	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state->state,
2745+									   plane);
2746+	struct drm_rect dest = drm_plane_state_dest(new_state);
2747+	struct meson_drm *priv = meson_cursor->priv;
2748+	struct drm_framebuffer *fb = new_state->fb;
2749+	struct drm_gem_cma_object *gem;
2750+	unsigned long flags;
2751+	int dst_w, dst_h;
2752+
2753+	/*
2754+	 * Update Coordinates
2755+	 * Update Formats
2756+	 * Update Buffer
2757+	 * Enable Plane
2758+	 */
2759+	spin_lock_irqsave(&priv->drm->event_lock, flags);
2760+
2761+	/* Enable OSD and BLK0, set max global alpha */
2762+	priv->viu.osd2_ctrl_stat = OSD_ENABLE |
2763+				   (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
2764+				   OSD_BLK0_ENABLE;
2765+
2766+	priv->viu.osd2_ctrl_stat2 = readl(priv->io_base +
2767+					  _REG(VIU_OSD2_CTRL_STAT2));
2768+
2769+	/* Set up BLK0 to point to the right canvas */
2770+	priv->viu.osd2_blk0_cfg[0] = priv->canvas_id_osd2 << OSD_CANVAS_SEL;
2771+	priv->viu.osd2_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
2772+
2773+	/* On GXBB, Use the old non-HDR RGB2YUV converter */
2774+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
2775+		priv->viu.osd2_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
2776+
2777+	switch (fb->format->format) {
2778+	case DRM_FORMAT_XRGB8888:
2779+	case DRM_FORMAT_ARGB8888:
2780+		priv->viu.osd2_blk0_cfg[0] |= OSD_BLK_MODE_32 |
2781+			OSD_COLOR_MATRIX_32_ARGB;
2782+		break;
2783+	case DRM_FORMAT_XBGR8888:
2784+	case DRM_FORMAT_ABGR8888:
2785+		priv->viu.osd2_blk0_cfg[0] |= OSD_BLK_MODE_32 |
2786+			OSD_COLOR_MATRIX_32_ABGR;
2787+		break;
2788+	case DRM_FORMAT_RGB888:
2789+		priv->viu.osd2_blk0_cfg[0] |= OSD_BLK_MODE_24 |
2790+			OSD_COLOR_MATRIX_24_RGB;
2791+		break;
2792+	case DRM_FORMAT_RGB565:
2793+		priv->viu.osd2_blk0_cfg[0] |= OSD_BLK_MODE_16 |
2794+			OSD_COLOR_MATRIX_16_RGB565;
2795+		break;
2796+	}
2797+
2798+	switch (fb->format->format) {
2799+	case DRM_FORMAT_XRGB8888:
2800+	case DRM_FORMAT_XBGR8888:
2801+		/* For XRGB, replace the pixel's alpha by 0xFF */
2802+		priv->viu.osd2_ctrl_stat2 |= OSD_REPLACE_EN;
2803+		break;
2804+	case DRM_FORMAT_ARGB8888:
2805+	case DRM_FORMAT_ABGR8888:
2806+		/* For ARGB, use the pixel's alpha */
2807+		priv->viu.osd2_ctrl_stat2 &= ~OSD_REPLACE_EN;
2808+		break;
2809+	}
2810+
2811+	dst_w = new_state->crtc_w;
2812+	dst_h = new_state->crtc_h;
2813+
2814+	if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
2815+		priv->viu.osd2_interlace = true;
2816+	else
2817+		priv->viu.osd2_interlace = false;
2818+
2819+	/*
2820+	 * The format of these registers is (x2 << 16 | x1),
2821+	 * where x2 is exclusive.
2822+	 * e.g. +30x1920 would be (1919 << 16) | 30
2823+	 */
2824+	priv->viu.osd2_blk0_cfg[1] =
2825+				((fixed16_to_int(new_state->src.x2) - 1) << 16) |
2826+				fixed16_to_int(new_state->src.x1);
2827+	priv->viu.osd2_blk0_cfg[2] =
2828+				((fixed16_to_int(new_state->src.y2) - 1) << 16) |
2829+				fixed16_to_int(new_state->src.y1);
2830+	priv->viu.osd2_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
2831+	priv->viu.osd2_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
2832+
2833+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
2834+		priv->viu.osd_blend_din3_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
2835+		priv->viu.osd_blend_din3_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
2836+		priv->viu.osb_blend1_size = dst_h << 16 | dst_w;
2837+	}
2838+
2839+	/* Update Canvas with buffer address */
2840+	gem = drm_fb_cma_get_gem_obj(fb, 0);
2841+
2842+	priv->viu.osd2_addr = gem->paddr;
2843+	priv->viu.osd2_stride = fb->pitches[0];
2844+	priv->viu.osd2_height = fb->height;
2845+	priv->viu.osd2_width = fb->width;
2846+
2847+	/* TOFIX: Reset OSD2 before enabling it on GXL+ SoCs ? */
2848+
2849+	priv->viu.osd2_enabled = true;
2850+
2851+	spin_unlock_irqrestore(&priv->drm->event_lock, flags);
2852+}
2853+
2854+static void meson_cursor_atomic_disable(struct drm_plane *plane,
2855+				       struct drm_plane_state *state)
2856+{
2857+	struct meson_cursor *meson_cursor = to_meson_cursor(plane);
2858+	struct meson_drm *priv = meson_cursor->priv;
2859+
2860+	/* Disable OSD2 */
2861+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
2862+		writel_bits_relaxed(OSD_BLEND_POSTBLD_SRC_OSD2, 0,
2863+				    priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
2864+	else
2865+		writel_bits_relaxed(VPP_OSD2_POSTBLEND, 0,
2866+				    priv->io_base + _REG(VPP_MISC));
2867+
2868+	priv->viu.osd2_enabled = false;
2869+}
2870+
2871+static const struct drm_plane_helper_funcs meson_cursor_helper_funcs = {
2872+	.atomic_check	= meson_cursor_atomic_check,
2873+	.atomic_disable	= meson_cursor_atomic_disable,
2874+	.atomic_update	= meson_cursor_atomic_update,
2875+};
2876+
2877+static const struct drm_plane_funcs meson_cursor_funcs = {
2878+	.update_plane		= drm_atomic_helper_update_plane,
2879+	.disable_plane		= drm_atomic_helper_disable_plane,
2880+	.destroy		= drm_plane_cleanup,
2881+	.reset			= drm_atomic_helper_plane_reset,
2882+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
2883+	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
2884+};
2885+
2886+static const uint32_t supported_drm_formats[] = {
2887+	DRM_FORMAT_ARGB8888,
2888+	DRM_FORMAT_ABGR8888,
2889+	DRM_FORMAT_XRGB8888,
2890+	DRM_FORMAT_XBGR8888,
2891+	DRM_FORMAT_RGB888,
2892+	DRM_FORMAT_RGB565,
2893+};
2894+
2895+static const uint64_t format_modifiers_default[] = {
2896+	DRM_FORMAT_MOD_LINEAR,
2897+	DRM_FORMAT_MOD_INVALID,
2898+};
2899+
2900+int meson_cursor_create(struct meson_drm *priv)
2901+{
2902+	struct meson_cursor *meson_cursor;
2903+	struct drm_plane *cursor;
2904+
2905+	meson_cursor = devm_kzalloc(priv->drm->dev, sizeof(*meson_cursor),
2906+				   GFP_KERNEL);
2907+	if (!meson_cursor)
2908+		return -ENOMEM;
2909+
2910+	meson_cursor->priv = priv;
2911+	cursor = &meson_cursor->base;
2912+
2913+	drm_universal_plane_init(priv->drm, cursor, 0xFF,
2914+				 &meson_cursor_funcs,
2915+				 supported_drm_formats,
2916+				 ARRAY_SIZE(supported_drm_formats),
2917+				 format_modifiers_default,
2918+				 DRM_PLANE_TYPE_CURSOR, "meson_cursor_plane");
2919+
2920+	drm_plane_helper_add(cursor, &meson_cursor_helper_funcs);
2921+
2922+	/* For now, OSD Cursor is always on top of the primary plane */
2923+	drm_plane_create_zpos_immutable_property(cursor, 2);
2924+
2925+	priv->cursor_plane = cursor;
2926+
2927+	return 0;
2928+}
2929diff -Naur a/drivers/gpu/drm/meson/meson_cursor.h b/drivers/gpu/drm/meson/meson_cursor.h
2930--- a/drivers/gpu/drm/meson/meson_cursor.h	1970-01-01 08:00:00.000000000 +0800
2931+++ b/drivers/gpu/drm/meson/meson_cursor.h	2023-02-23 17:02:04.955751014 +0800
2932@@ -0,0 +1,14 @@
2933+/* SPDX-License-Identifier: GPL-2.0-or-later */
2934+/*
2935+ * Copyright (C) 2021 BayLibre, SAS
2936+ * Author: Neil Armstrong <narmstrong@baylibre.com>
2937+ */
2938+
2939+#ifndef __MESON_CURSOR_H
2940+#define __MESON_CURSOR_H
2941+
2942+#include "meson_drv.h"
2943+
2944+int meson_cursor_create(struct meson_drm *priv);
2945+
2946+#endif /* __MESON_CURSOR_H */
2947diff -Naur a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
2948--- a/drivers/gpu/drm/meson/meson_drv.c	2023-11-30 21:03:09.419481221 +0800
2949+++ b/drivers/gpu/drm/meson/meson_drv.c	2023-12-04 09:42:14.153284521 +0800
2950@@ -15,6 +15,7 @@
2951 #include <linux/platform_device.h>
2952 #include <linux/soc/amlogic/meson-canvas.h>
2953
2954+#include <drm/drm_aperture.h>
2955 #include <drm/drm_atomic_helper.h>
2956 #include <drm/drm_drv.h>
2957 #include <drm/drm_fb_helper.h>
2958@@ -29,9 +30,12 @@
2959 #include "meson_drv.h"
2960 #include "meson_overlay.h"
2961 #include "meson_plane.h"
2962+#include "meson_cursor.h"
2963 #include "meson_osd_afbcd.h"
2964 #include "meson_registers.h"
2965-#include "meson_venc_cvbs.h"
2966+#include "meson_encoder_cvbs.h"
2967+#include "meson_encoder_hdmi.h"
2968+#include "meson_encoder_dsi.h"
2969 #include "meson_viu.h"
2970 #include "meson_vpp.h"
2971 #include "meson_rdma.h"
2972@@ -93,9 +97,6 @@
2973 static struct drm_driver meson_driver = {
2974 	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
2975
2976-	/* IRQ */
2977-	.irq_handler		= meson_irq,
2978-
2979 	/* CMA Ops */
2980 	DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
2981
2982@@ -159,23 +160,6 @@
2983 	writel_relaxed(value, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
2984 }
2985
2986-static void meson_remove_framebuffers(void)
2987-{
2988-	struct apertures_struct *ap;
2989-
2990-	ap = alloc_apertures(1);
2991-	if (!ap)
2992-		return;
2993-
2994-	/* The framebuffer can be located anywhere in RAM */
2995-	ap->ranges[0].base = 0;
2996-	ap->ranges[0].size = ~0;
2997-
2998-	drm_fb_helper_remove_conflicting_framebuffers(ap, "meson-drm-fb",
2999-						      false);
3000-	kfree(ap);
3001-}
3002-
3003 struct meson_drm_soc_attr {
3004 	struct meson_drm_soc_limits limits;
3005 	const struct soc_device_attribute *attrs;
3006@@ -229,8 +213,7 @@
3007 	priv->compat = match->compat;
3008 	priv->afbcd.ops = match->afbcd_ops;
3009
3010-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
3011-	regs = devm_ioremap_resource(dev, res);
3012+	regs = devm_platform_ioremap_resource_byname(pdev, "vpu");
3013 	if (IS_ERR(regs)) {
3014 		ret = PTR_ERR(regs);
3015 		goto free_drm;
3016@@ -267,6 +250,9 @@
3017 	ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
3018 	if (ret)
3019 		goto free_drm;
3020+	ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd2);
3021+	if (ret)
3022+		goto free_drm;
3023 	ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
3024 	if (ret) {
3025 		meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
3026@@ -300,8 +286,13 @@
3027 		}
3028 	}
3029
3030-	/* Remove early framebuffers (ie. simplefb) */
3031-	meson_remove_framebuffers();
3032+	/*
3033+	 * Remove early framebuffers (ie. simplefb). The framebuffer can be
3034+	 * located anywhere in RAM
3035+	 */
3036+	ret = drm_aperture_remove_framebuffers(false, &meson_driver);
3037+	if (ret)
3038+		goto free_drm;
3039
3040 	ret = drmm_mode_config_init(drm);
3041 	if (ret)
3042@@ -325,7 +316,7 @@
3043
3044 	/* Encoder Initialization */
3045
3046-	ret = meson_venc_cvbs_create(priv);
3047+	ret = meson_encoder_cvbs_init(priv);
3048 	if (ret)
3049 		goto exit_afbcd;
3050
3051@@ -337,6 +328,20 @@
3052 		}
3053 	}
3054
3055+	ret = meson_encoder_hdmi_init(priv);
3056+	if (ret)
3057+		goto exit_afbcd;
3058+
3059+	ret = meson_cursor_create(priv);
3060+	if (ret)
3061+		goto exit_afbcd;
3062+
3063+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
3064+		ret = meson_encoder_dsi_init(priv);
3065+		if (ret)
3066+			goto free_drm;
3067+	}
3068+
3069 	ret = meson_plane_create(priv);
3070 	if (ret)
3071 		goto unbind_all;
3072@@ -349,9 +354,9 @@
3073 	if (ret)
3074 		goto unbind_all;
3075
3076-	ret = drm_irq_install(drm, priv->vsync_irq);
3077-	if (ret)
3078-		goto unbind_all;
3079+	ret = request_irq(priv->vsync_irq, meson_irq, IRQF_SHARED, drm->driver->name, drm);
3080+ 	if (ret)
3081+		goto exit_afbcd;
3082
3083 	drm_mode_config_reset(drm);
3084
3085@@ -368,7 +373,7 @@
3086 	return 0;
3087
3088 uninstall_irq:
3089-	drm_irq_uninstall(drm);
3090+	free_irq(priv->vsync_irq, drm);
3091 unbind_all:
3092 	if (has_components)
3093 		component_unbind_all(drm->dev, drm);
3094@@ -402,7 +407,7 @@
3095 	drm_kms_helper_poll_fini(drm);
3096 	drm_atomic_helper_shutdown(drm);
3097 	component_unbind_all(dev, drm);
3098-	drm_irq_uninstall(drm);
3099+	free_irq(priv->vsync_irq, drm);
3100 	drm_dev_put(drm);
3101
3102 	if (priv->afbcd.ops)
3103@@ -449,46 +454,6 @@
3104 	return dev->of_node == data;
3105 }
3106
3107-/* Possible connectors nodes to ignore */
3108-static const struct of_device_id connectors_match[] = {
3109-	{ .compatible = "composite-video-connector" },
3110-	{ .compatible = "svideo-connector" },
3111-	{ .compatible = "hdmi-connector" },
3112-	{ .compatible = "dvi-connector" },
3113-	{}
3114-};
3115-
3116-static int meson_probe_remote(struct platform_device *pdev,
3117-			      struct component_match **match,
3118-			      struct device_node *parent,
3119-			      struct device_node *remote)
3120-{
3121-	struct device_node *ep, *remote_node;
3122-	int count = 1;
3123-
3124-	/* If node is a connector, return and do not add to match table */
3125-	if (of_match_node(connectors_match, remote))
3126-		return 1;
3127-
3128-	component_match_add(&pdev->dev, match, compare_of, remote);
3129-
3130-	for_each_endpoint_of_node(remote, ep) {
3131-		remote_node = of_graph_get_remote_port_parent(ep);
3132-		if (!remote_node ||
3133-		    remote_node == parent || /* Ignore parent endpoint */
3134-		    !of_device_is_available(remote_node)) {
3135-			of_node_put(remote_node);
3136-			continue;
3137-		}
3138-
3139-		count += meson_probe_remote(pdev, match, remote, remote_node);
3140-
3141-		of_node_put(remote_node);
3142-	}
3143-
3144-	return count;
3145-}
3146-
3147 static void meson_drv_shutdown(struct platform_device *pdev)
3148 {
3149 	struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
3150@@ -500,6 +465,13 @@
3151 	drm_atomic_helper_shutdown(priv->drm);
3152 }
3153
3154+/* Possible connectors nodes to ignore */
3155+static const struct of_device_id connectors_match[] = {
3156+	{ .compatible = "composite-video-connector" },
3157+	{ .compatible = "svideo-connector" },
3158+	{}
3159+};
3160+
3161 static int meson_drv_probe(struct platform_device *pdev)
3162 {
3163 	struct component_match *match = NULL;
3164@@ -514,8 +486,21 @@
3165 			continue;
3166 		}
3167
3168-		count += meson_probe_remote(pdev, &match, np, remote);
3169+		/* If an analog connector is detected, count it as an output */
3170+		if (of_match_node(connectors_match, remote)) {
3171+			++count;
3172+			of_node_put(remote);
3173+			continue;
3174+		}
3175+
3176+		dev_dbg(&pdev->dev, "parent %pOF remote match add %pOF parent %s\n",
3177+			np, remote, dev_name(&pdev->dev));
3178+
3179+		component_match_add(&pdev->dev, &match, compare_of, remote);
3180+
3181 		of_node_put(remote);
3182+
3183+		++count;
3184 	}
3185
3186 	if (count && !match)
3187diff -Naur a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
3188--- a/drivers/gpu/drm/meson/meson_drv.h	2022-12-19 17:13:12.677518989 +0800
3189+++ b/drivers/gpu/drm/meson/meson_drv.h	2023-02-23 17:02:04.955751014 +0800
3190@@ -43,12 +43,14 @@
3191
3192 	struct meson_canvas *canvas;
3193 	u8 canvas_id_osd1;
3194+	u8 canvas_id_osd2;
3195 	u8 canvas_id_vd1_0;
3196 	u8 canvas_id_vd1_1;
3197 	u8 canvas_id_vd1_2;
3198
3199 	struct drm_device *drm;
3200 	struct drm_crtc *crtc;
3201+	struct drm_plane *cursor_plane;
3202 	struct drm_plane *primary_plane;
3203 	struct drm_plane *overlay_plane;
3204
3205@@ -82,6 +84,21 @@
3206 		uint32_t osd_blend_din0_scope_h;
3207 		uint32_t osd_blend_din0_scope_v;
3208 		uint32_t osb_blend0_size;
3209+
3210+		bool osd2_enabled;
3211+		bool osd2_interlace;
3212+		bool osd2_commit;
3213+		uint32_t osd2_ctrl_stat;
3214+		uint32_t osd2_ctrl_stat2;
3215+		uint32_t osd2_blk0_cfg[5];
3216+		uint32_t osd2_blk1_cfg4;
3217+		uint32_t osd2_blk2_cfg4;
3218+		uint32_t osd2_addr;
3219+		uint32_t osd2_stride;
3220+		uint32_t osd2_height;
3221+		uint32_t osd2_width;
3222+		uint32_t osd_blend_din3_scope_h;
3223+		uint32_t osd_blend_din3_scope_v;
3224 		uint32_t osb_blend1_size;
3225
3226 		bool vd1_enabled;
3227diff -Naur a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
3228--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c	2022-12-19 17:13:12.677518989 +0800
3229+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c	2023-02-23 17:02:04.955751014 +0800
3230@@ -22,14 +22,11 @@
3231 #include <drm/drm_probe_helper.h>
3232 #include <drm/drm_print.h>
3233
3234-#include <linux/media-bus-format.h>
3235 #include <linux/videodev2.h>
3236
3237 #include "meson_drv.h"
3238 #include "meson_dw_hdmi.h"
3239 #include "meson_registers.h"
3240-#include "meson_vclk.h"
3241-#include "meson_venc.h"
3242
3243 #define DRIVER_NAME "meson-dw-hdmi"
3244 #define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver"
3245@@ -135,8 +132,6 @@
3246 };
3247
3248 struct meson_dw_hdmi {
3249-	struct drm_encoder encoder;
3250-	struct drm_bridge bridge;
3251 	struct dw_hdmi_plat_data dw_plat_data;
3252 	struct meson_drm *priv;
3253 	struct device *dev;
3254@@ -148,12 +143,8 @@
3255 	struct regulator *hdmi_supply;
3256 	u32 irq_stat;
3257 	struct dw_hdmi *hdmi;
3258-	unsigned long output_bus_fmt;
3259+	struct drm_bridge *bridge;
3260 };
3261-#define encoder_to_meson_dw_hdmi(x) \
3262-	container_of(x, struct meson_dw_hdmi, encoder)
3263-#define bridge_to_meson_dw_hdmi(x) \
3264-	container_of(x, struct meson_dw_hdmi, bridge)
3265
3266 static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
3267 					const char *compat)
3268@@ -295,14 +286,14 @@
3269
3270 /* Setup PHY bandwidth modes */
3271 static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
3272-				      const struct drm_display_mode *mode)
3273+				      const struct drm_display_mode *mode,
3274+				      bool mode_is_420)
3275 {
3276 	struct meson_drm *priv = dw_hdmi->priv;
3277 	unsigned int pixel_clock = mode->clock;
3278
3279 	/* For 420, pixel clock is half unlike venc clock */
3280-	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
3281-		pixel_clock /= 2;
3282+	if (mode_is_420) pixel_clock /= 2;
3283
3284 	if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
3285 	    dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
3286@@ -374,68 +365,25 @@
3287 	mdelay(2);
3288 }
3289
3290-static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
3291-			     const struct drm_display_mode *mode)
3292-{
3293-	struct meson_drm *priv = dw_hdmi->priv;
3294-	int vic = drm_match_cea_mode(mode);
3295-	unsigned int phy_freq;
3296-	unsigned int vclk_freq;
3297-	unsigned int venc_freq;
3298-	unsigned int hdmi_freq;
3299-
3300-	vclk_freq = mode->clock;
3301-
3302-	/* For 420, pixel clock is half unlike venc clock */
3303-	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
3304-		vclk_freq /= 2;
3305-
3306-	/* TMDS clock is pixel_clock * 10 */
3307-	phy_freq = vclk_freq * 10;
3308-
3309-	if (!vic) {
3310-		meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
3311-				 vclk_freq, vclk_freq, vclk_freq, false);
3312-		return;
3313-	}
3314-
3315-	/* 480i/576i needs global pixel doubling */
3316-	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3317-		vclk_freq *= 2;
3318-
3319-	venc_freq = vclk_freq;
3320-	hdmi_freq = vclk_freq;
3321-
3322-	/* VENC double pixels for 1080i, 720p and YUV420 modes */
3323-	if (meson_venc_hdmi_venc_repeat(vic) ||
3324-	    dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
3325-		venc_freq *= 2;
3326-
3327-	vclk_freq = max(venc_freq, hdmi_freq);
3328-
3329-	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3330-		venc_freq /= 2;
3331-
3332-	DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
3333-		phy_freq, vclk_freq, venc_freq, hdmi_freq,
3334-		priv->venc.hdmi_use_enci);
3335-
3336-	meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
3337-			 venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
3338-}
3339-
3340 static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
3341 			    const struct drm_display_info *display,
3342 			    const struct drm_display_mode *mode)
3343 {
3344 	struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
3345+	bool is_hdmi2_sink = display->hdmi.scdc.supported;
3346 	struct meson_drm *priv = dw_hdmi->priv;
3347 	unsigned int wr_clk =
3348 		readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING));
3349+	bool mode_is_420 = false;
3350
3351 	DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name,
3352 			 mode->clock > 340000 ? 40 : 10);
3353
3354+	if (drm_mode_is_420_only(display, mode) ||
3355+	    (!is_hdmi2_sink &&
3356+	     drm_mode_is_420_also(display, mode)))
3357+		mode_is_420 = true;
3358+
3359 	/* Enable clocks */
3360 	regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
3361
3362@@ -457,8 +405,7 @@
3363 	dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
3364
3365 	/* TMDS pattern setup */
3366-	if (mode->clock > 340000 &&
3367-	    dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
3368+	if (mode->clock > 340000 && !mode_is_420) {
3369 		dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
3370 				  0);
3371 		dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
3372@@ -476,7 +423,7 @@
3373 	dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
3374
3375 	/* Setup PHY parameters */
3376-	meson_hdmi_phy_setup_mode(dw_hdmi, mode);
3377+	meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420);
3378
3379 	/* Setup PHY */
3380 	regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
3381@@ -622,214 +569,15 @@
3382 		dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
3383 				       hpd_connected);
3384
3385-		drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
3386+		drm_helper_hpd_irq_event(dw_hdmi->bridge->dev);
3387+		drm_bridge_hpd_notify(dw_hdmi->bridge,
3388+				      hpd_connected ? connector_status_connected
3389+						    : connector_status_disconnected);
3390 	}
3391
3392 	return IRQ_HANDLED;
3393 }
3394
3395-static enum drm_mode_status
3396-dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
3397-		   const struct drm_display_info *display_info,
3398-		   const struct drm_display_mode *mode)
3399-{
3400-	struct meson_dw_hdmi *dw_hdmi = data;
3401-	struct meson_drm *priv = dw_hdmi->priv;
3402-	bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
3403-	unsigned int phy_freq;
3404-	unsigned int vclk_freq;
3405-	unsigned int venc_freq;
3406-	unsigned int hdmi_freq;
3407-	int vic = drm_match_cea_mode(mode);
3408-	enum drm_mode_status status;
3409-
3410-	DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
3411-
3412-	/* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
3413-	if (display_info->max_tmds_clock &&
3414-	    mode->clock > display_info->max_tmds_clock &&
3415-	    !drm_mode_is_420_only(display_info, mode) &&
3416-	    !drm_mode_is_420_also(display_info, mode))
3417-		return MODE_BAD;
3418-
3419-	/* Check against non-VIC supported modes */
3420-	if (!vic) {
3421-		status = meson_venc_hdmi_supported_mode(mode);
3422-		if (status != MODE_OK)
3423-			return status;
3424-
3425-		return meson_vclk_dmt_supported_freq(priv, mode->clock);
3426-	/* Check against supported VIC modes */
3427-	} else if (!meson_venc_hdmi_supported_vic(vic))
3428-		return MODE_BAD;
3429-
3430-	vclk_freq = mode->clock;
3431-
3432-	/* For 420, pixel clock is half unlike venc clock */
3433-	if (drm_mode_is_420_only(display_info, mode) ||
3434-	    (!is_hdmi2_sink &&
3435-	     drm_mode_is_420_also(display_info, mode)))
3436-		vclk_freq /= 2;
3437-
3438-	/* TMDS clock is pixel_clock * 10 */
3439-	phy_freq = vclk_freq * 10;
3440-
3441-	/* 480i/576i needs global pixel doubling */
3442-	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3443-		vclk_freq *= 2;
3444-
3445-	venc_freq = vclk_freq;
3446-	hdmi_freq = vclk_freq;
3447-
3448-	/* VENC double pixels for 1080i, 720p and YUV420 modes */
3449-	if (meson_venc_hdmi_venc_repeat(vic) ||
3450-	    drm_mode_is_420_only(display_info, mode) ||
3451-	    (!is_hdmi2_sink &&
3452-	     drm_mode_is_420_also(display_info, mode)))
3453-		venc_freq *= 2;
3454-
3455-	vclk_freq = max(venc_freq, hdmi_freq);
3456-
3457-	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3458-		venc_freq /= 2;
3459-
3460-	dev_dbg(dw_hdmi->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
3461-		__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
3462-
3463-	return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
3464-}
3465-
3466-/* Encoder */
3467-
3468-static const u32 meson_dw_hdmi_out_bus_fmts[] = {
3469-	MEDIA_BUS_FMT_YUV8_1X24,
3470-	MEDIA_BUS_FMT_UYYVYY8_0_5X24,
3471-};
3472-
3473-static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
3474-{
3475-	drm_encoder_cleanup(encoder);
3476-}
3477-
3478-static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
3479-	.destroy        = meson_venc_hdmi_encoder_destroy,
3480-};
3481-
3482-static u32 *
3483-meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
3484-					struct drm_bridge_state *bridge_state,
3485-					struct drm_crtc_state *crtc_state,
3486-					struct drm_connector_state *conn_state,
3487-					u32 output_fmt,
3488-					unsigned int *num_input_fmts)
3489-{
3490-	u32 *input_fmts = NULL;
3491-	int i;
3492-
3493-	*num_input_fmts = 0;
3494-
3495-	for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) {
3496-		if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) {
3497-			*num_input_fmts = 1;
3498-			input_fmts = kcalloc(*num_input_fmts,
3499-					     sizeof(*input_fmts),
3500-					     GFP_KERNEL);
3501-			if (!input_fmts)
3502-				return NULL;
3503-
3504-			input_fmts[0] = output_fmt;
3505-
3506-			break;
3507-		}
3508-	}
3509-
3510-	return input_fmts;
3511-}
3512-
3513-static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
3514-					struct drm_bridge_state *bridge_state,
3515-					struct drm_crtc_state *crtc_state,
3516-					struct drm_connector_state *conn_state)
3517-{
3518-	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
3519-
3520-	dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
3521-
3522-	DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt);
3523-
3524-	return 0;
3525-}
3526-
3527-static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
3528-{
3529-	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
3530-	struct meson_drm *priv = dw_hdmi->priv;
3531-
3532-	DRM_DEBUG_DRIVER("\n");
3533-
3534-	writel_bits_relaxed(0x3, 0,
3535-			    priv->io_base + _REG(VPU_HDMI_SETTING));
3536-
3537-	writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
3538-	writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
3539-}
3540-
3541-static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
3542-{
3543-	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
3544-	struct meson_drm *priv = dw_hdmi->priv;
3545-
3546-	DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
3547-
3548-	if (priv->venc.hdmi_use_enci)
3549-		writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
3550-	else
3551-		writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
3552-}
3553-
3554-static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge,
3555-				   const struct drm_display_mode *mode,
3556-				   const struct drm_display_mode *adjusted_mode)
3557-{
3558-	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
3559-	struct meson_drm *priv = dw_hdmi->priv;
3560-	int vic = drm_match_cea_mode(mode);
3561-	unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
3562-	bool yuv420_mode = false;
3563-
3564-	DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
3565-
3566-	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
3567-		ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
3568-		yuv420_mode = true;
3569-	}
3570-
3571-	/* VENC + VENC-DVI Mode setup */
3572-	meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
3573-
3574-	/* VCLK Set clock */
3575-	dw_hdmi_set_vclk(dw_hdmi, mode);
3576-
3577-	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
3578-		/* Setup YUV420 to HDMI-TX, no 10bit diphering */
3579-		writel_relaxed(2 | (2 << 2),
3580-			       priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
3581-	else
3582-		/* Setup YUV444 to HDMI-TX, no 10bit diphering */
3583-		writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
3584-}
3585-
3586-static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = {
3587-	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
3588-	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
3589-	.atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts,
3590-	.atomic_reset = drm_atomic_helper_bridge_reset,
3591-	.atomic_check = meson_venc_hdmi_encoder_atomic_check,
3592-	.enable	= meson_venc_hdmi_encoder_enable,
3593-	.disable = meson_venc_hdmi_encoder_disable,
3594-	.mode_set = meson_venc_hdmi_encoder_mode_set,
3595-};
3596-
3597 /* DW HDMI Regmap */
3598
3599 static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
3600@@ -876,28 +624,6 @@
3601 	.dwc_write = dw_hdmi_g12a_dwc_write,
3602 };
3603
3604-static bool meson_hdmi_connector_is_available(struct device *dev)
3605-{
3606-	struct device_node *ep, *remote;
3607-
3608-	/* HDMI Connector is on the second port, first endpoint */
3609-	ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0);
3610-	if (!ep)
3611-		return false;
3612-
3613-	/* If the endpoint node exists, consider it enabled */
3614-	remote = of_graph_get_remote_port(ep);
3615-	if (remote) {
3616-		of_node_put(ep);
3617-		return true;
3618-	}
3619-
3620-	of_node_put(ep);
3621-	of_node_put(remote);
3622-
3623-	return false;
3624-}
3625-
3626 static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
3627 {
3628 	struct meson_drm *priv = meson_dw_hdmi->priv;
3629@@ -976,19 +702,11 @@
3630 	struct drm_device *drm = data;
3631 	struct meson_drm *priv = drm->dev_private;
3632 	struct dw_hdmi_plat_data *dw_plat_data;
3633-	struct drm_bridge *next_bridge;
3634-	struct drm_encoder *encoder;
3635-	struct resource *res;
3636 	int irq;
3637 	int ret;
3638
3639 	DRM_DEBUG_DRIVER("\n");
3640
3641-	if (!meson_hdmi_connector_is_available(dev)) {
3642-		dev_info(drm->dev, "HDMI Output connector not available\n");
3643-		return -ENODEV;
3644-	}
3645-
3646 	match = of_device_get_match_data(&pdev->dev);
3647 	if (!match) {
3648 		dev_err(&pdev->dev, "failed to get match data\n");
3649@@ -1004,7 +722,6 @@
3650 	meson_dw_hdmi->dev = dev;
3651 	meson_dw_hdmi->data = match;
3652 	dw_plat_data = &meson_dw_hdmi->dw_plat_data;
3653-	encoder = &meson_dw_hdmi->encoder;
3654
3655 	meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi");
3656 	if (IS_ERR(meson_dw_hdmi->hdmi_supply)) {
3657@@ -1042,8 +759,7 @@
3658 		return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
3659 	}
3660
3661-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3662-	meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
3663+	meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0);
3664 	if (IS_ERR(meson_dw_hdmi->hdmitx))
3665 		return PTR_ERR(meson_dw_hdmi->hdmitx);
3666
3667@@ -1076,33 +792,18 @@
3668 		return ret;
3669 	}
3670
3671-	/* Encoder */
3672-
3673-	ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
3674-			       DRM_MODE_ENCODER_TMDS, "meson_hdmi");
3675-	if (ret) {
3676-		dev_err(priv->dev, "Failed to init HDMI encoder\n");
3677-		return ret;
3678-	}
3679-
3680-	meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs;
3681-	drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0);
3682-
3683-	encoder->possible_crtcs = BIT(0);
3684-
3685 	meson_dw_hdmi_init(meson_dw_hdmi);
3686
3687-	DRM_DEBUG_DRIVER("encoder initialized\n");
3688-
3689 	/* Bridge / Connector */
3690
3691 	dw_plat_data->priv_data = meson_dw_hdmi;
3692-	dw_plat_data->mode_valid = dw_hdmi_mode_valid;
3693 	dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
3694 	dw_plat_data->phy_name = "meson_dw_hdmi_phy";
3695 	dw_plat_data->phy_data = meson_dw_hdmi;
3696 	dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
3697 	dw_plat_data->ycbcr_420_allowed = true;
3698+	dw_plat_data->disable_cec = true;
3699+	dw_plat_data->output_port = 1;
3700
3701 	if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
3702 	    dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
3703@@ -1111,15 +812,15 @@
3704
3705 	platform_set_drvdata(pdev, meson_dw_hdmi);
3706
3707-	meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev,
3708-					    &meson_dw_hdmi->dw_plat_data);
3709+	meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, &meson_dw_hdmi->dw_plat_data);
3710 	if (IS_ERR(meson_dw_hdmi->hdmi))
3711 		return PTR_ERR(meson_dw_hdmi->hdmi);
3712
3713-	next_bridge = of_drm_find_bridge(pdev->dev.of_node);
3714-	if (next_bridge)
3715-		drm_bridge_attach(encoder, next_bridge,
3716-				  &meson_dw_hdmi->bridge, 0);
3717+	meson_dw_hdmi->bridge = of_drm_find_bridge(pdev->dev.of_node);
3718+
3719+#ifdef CONFIG_DRIVERS_HDF_AUDIO
3720+	dw_hdmi_audio_enable(meson_dw_hdmi->hdmi);
3721+#endif
3722
3723 	DRM_DEBUG_DRIVER("HDMI controller initialized\n");
3724
3725diff -Naur a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
3726--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c	1970-01-01 08:00:00.000000000 +0800
3727+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c	2023-02-23 17:02:04.955751014 +0800
3728@@ -0,0 +1,364 @@
3729+// SPDX-License-Identifier: GPL-2.0-or-later
3730+/*
3731+ * Copyright (C) 2021 BayLibre, SAS
3732+ * Author: Neil Armstrong <narmstrong@baylibre.com>
3733+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
3734+ */
3735+
3736+#include <linux/clk.h>
3737+#include <linux/component.h>
3738+#include <linux/kernel.h>
3739+#include <linux/module.h>
3740+#include <linux/of_device.h>
3741+#include <linux/of_graph.h>
3742+#include <linux/reset.h>
3743+#include <linux/phy/phy.h>
3744+#include <linux/bitfield.h>
3745+
3746+#include <video/mipi_display.h>
3747+
3748+#include <drm/bridge/dw_mipi_dsi.h>
3749+#include <drm/drm_mipi_dsi.h>
3750+
3751+#include <drm/drm_atomic_helper.h>
3752+#include <drm/drm_device.h>
3753+#include <drm/drm_probe_helper.h>
3754+#include <drm/drm_print.h>
3755+
3756+#include "meson_drv.h"
3757+#include "meson_dw_mipi_dsi.h"
3758+#include "meson_registers.h"
3759+#include "meson_venc.h"
3760+
3761+#define DRIVER_NAME "meson-dw-mipi-dsi"
3762+#define DRIVER_DESC "Amlogic Meson MIPI-DSI DRM driver"
3763+
3764+struct meson_dw_mipi_dsi {
3765+	struct meson_drm *priv;
3766+	struct device *dev;
3767+	void __iomem *base;
3768+	struct phy *phy;
3769+	union phy_configure_opts phy_opts;
3770+	struct dw_mipi_dsi *dmd;
3771+	struct dw_mipi_dsi_plat_data pdata;
3772+	struct mipi_dsi_device *dsi_device;
3773+	const struct drm_display_mode *mode;
3774+	struct clk *px_clk;
3775+};
3776+
3777+#define encoder_to_meson_dw_mipi_dsi(x) \
3778+	container_of(x, struct meson_dw_mipi_dsi, encoder)
3779+
3780+static void meson_dw_mipi_dsi_hw_init(struct meson_dw_mipi_dsi *mipi_dsi)
3781+{
3782+	/* Software reset */
3783+	writel_bits_relaxed(MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR |
3784+			    MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING,
3785+			    MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR |
3786+			    MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING,
3787+			    mipi_dsi->base + MIPI_DSI_TOP_SW_RESET);
3788+	writel_bits_relaxed(MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR |
3789+			    MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING,
3790+			    0, mipi_dsi->base + MIPI_DSI_TOP_SW_RESET);
3791+
3792+	/* Enable clocks */
3793+	writel_bits_relaxed(MIPI_DSI_TOP_CLK_SYSCLK_EN | MIPI_DSI_TOP_CLK_PIXCLK_EN,
3794+			    MIPI_DSI_TOP_CLK_SYSCLK_EN | MIPI_DSI_TOP_CLK_PIXCLK_EN,
3795+			    mipi_dsi->base + MIPI_DSI_TOP_CLK_CNTL);
3796+
3797+	/* Take memory out of power down */
3798+	writel_relaxed(0, mipi_dsi->base + MIPI_DSI_TOP_MEM_PD);
3799+}
3800+
3801+static int dw_mipi_dsi_phy_init(void *priv_data)
3802+{
3803+	struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
3804+	unsigned int dpi_data_format, venc_data_width;
3805+	int ret;
3806+
3807+	ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate);
3808+	if (ret) {
3809+		pr_err("Failed to set DSI PLL rate %lu\n",
3810+		       mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate);
3811+
3812+		return ret;
3813+	}
3814+
3815+	switch (mipi_dsi->dsi_device->format) {
3816+	case MIPI_DSI_FMT_RGB888:
3817+		dpi_data_format = DPI_COLOR_24BIT;
3818+		venc_data_width = VENC_IN_COLOR_24B;
3819+		break;
3820+	case MIPI_DSI_FMT_RGB666:
3821+		dpi_data_format = DPI_COLOR_18BIT_CFG_2;
3822+		venc_data_width = VENC_IN_COLOR_18B;
3823+		break;
3824+	case MIPI_DSI_FMT_RGB666_PACKED:
3825+	case MIPI_DSI_FMT_RGB565:
3826+		return -EINVAL;
3827+	};
3828+
3829+	/* Configure color format for DPI register */
3830+	writel_relaxed(FIELD_PREP(MIPI_DSI_TOP_DPI_COLOR_MODE, dpi_data_format) |
3831+		       FIELD_PREP(MIPI_DSI_TOP_IN_COLOR_MODE, venc_data_width) |
3832+		       FIELD_PREP(MIPI_DSI_TOP_COMP2_SEL, 2) |
3833+		       FIELD_PREP(MIPI_DSI_TOP_COMP1_SEL, 1) |
3834+		       FIELD_PREP(MIPI_DSI_TOP_COMP0_SEL, 0) |
3835+		       (mipi_dsi->mode->flags & DRM_MODE_FLAG_NHSYNC ?
3836+				0 : MIPI_DSI_TOP_HSYNC_INVERT) |
3837+		       (mipi_dsi->mode->flags & DRM_MODE_FLAG_NVSYNC ?
3838+				0 : MIPI_DSI_TOP_VSYNC_INVERT),
3839+			mipi_dsi->base + MIPI_DSI_TOP_CNTL);
3840+
3841+	return phy_configure(mipi_dsi->phy, &mipi_dsi->phy_opts);
3842+}
3843+
3844+static void dw_mipi_dsi_phy_power_on(void *priv_data)
3845+{
3846+	struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
3847+
3848+	if (phy_power_on(mipi_dsi->phy))
3849+		dev_warn(mipi_dsi->dev, "Failed to power on PHY\n");
3850+}
3851+
3852+static void dw_mipi_dsi_phy_power_off(void *priv_data)
3853+{
3854+	struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
3855+
3856+	if (phy_power_off(mipi_dsi->phy))
3857+		dev_warn(mipi_dsi->dev, "Failed to power off PHY\n");
3858+}
3859+
3860+static int
3861+dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
3862+			  unsigned long mode_flags, u32 lanes, u32 format,
3863+			  unsigned int *lane_mbps)
3864+{
3865+	struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
3866+	int bpp;
3867+
3868+	mipi_dsi->mode = mode;
3869+
3870+	bpp = mipi_dsi_pixel_format_to_bpp(mipi_dsi->dsi_device->format);
3871+
3872+	phy_mipi_dphy_get_default_config(mode->clock * 1000,
3873+					 bpp, mipi_dsi->dsi_device->lanes,
3874+					 &mipi_dsi->phy_opts.mipi_dphy);
3875+
3876+	// UNIONMAN add: Round to 800MHz(needed by waveshare panel) (FIXME)
3877+	if (mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate > 600*1000000) {
3878+		mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate = max(800 * 1000000,
3879+				mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate);
3880+	}
3881+
3882+	*lane_mbps = mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate / 1000000;
3883+
3884+	return 0;
3885+}
3886+
3887+static int
3888+dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
3889+			   struct dw_mipi_dsi_dphy_timing *timing)
3890+{
3891+	/* TOFIX handle other cases */
3892+
3893+	timing->clk_lp2hs = 37;
3894+	timing->clk_hs2lp = 135;
3895+	timing->data_lp2hs = 50;
3896+	timing->data_hs2lp = 3;
3897+
3898+	return 0;
3899+}
3900+
3901+static int
3902+dw_mipi_dsi_get_esc_clk_rate(void *priv_data, unsigned int *esc_clk_rate)
3903+{
3904+	*esc_clk_rate = 4; /* Mhz */
3905+
3906+	return 0;
3907+}
3908+
3909+static const struct dw_mipi_dsi_phy_ops meson_dw_mipi_dsi_phy_ops = {
3910+	.init = dw_mipi_dsi_phy_init,
3911+	.power_on = dw_mipi_dsi_phy_power_on,
3912+	.power_off = dw_mipi_dsi_phy_power_off,
3913+	.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
3914+	.get_timing = dw_mipi_dsi_phy_get_timing,
3915+	.get_esc_clk_rate = dw_mipi_dsi_get_esc_clk_rate,
3916+};
3917+
3918+static int meson_dw_mipi_dsi_bind(struct device *dev, struct device *master, void *data)
3919+{
3920+	struct meson_dw_mipi_dsi *mipi_dsi = dev_get_drvdata(dev);
3921+	struct drm_device *drm = data;
3922+	struct meson_drm *priv = drm->dev_private;
3923+
3924+	/* Check before if we are supposed to have a sub-device... */
3925+	if (!mipi_dsi->dsi_device) {
3926+		dw_mipi_dsi_remove(mipi_dsi->dmd);
3927+		return -EPROBE_DEFER;
3928+	}
3929+
3930+	mipi_dsi->priv = priv;
3931+
3932+	meson_dw_mipi_dsi_hw_init(mipi_dsi);
3933+
3934+	return 0;
3935+}
3936+
3937+static const struct component_ops meson_dw_mipi_dsi_ops = {
3938+	.bind	= meson_dw_mipi_dsi_bind,
3939+};
3940+
3941+static int meson_dw_mipi_dsi_host_attach(void *priv_data,
3942+					 struct mipi_dsi_device *device)
3943+{
3944+	struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
3945+
3946+	mipi_dsi->dsi_device = device;
3947+
3948+	switch (device->format) {
3949+	case MIPI_DSI_FMT_RGB888:
3950+		break;
3951+	case MIPI_DSI_FMT_RGB666:
3952+		break;
3953+	case MIPI_DSI_FMT_RGB666_PACKED:
3954+	case MIPI_DSI_FMT_RGB565:
3955+		dev_err(mipi_dsi->dev, "invalid pixel format %d\n", device->format);
3956+		return -EINVAL;
3957+	};
3958+
3959+	return phy_init(mipi_dsi->phy);
3960+}
3961+
3962+static int meson_dw_mipi_dsi_host_detach(void *priv_data,
3963+					 struct mipi_dsi_device *device)
3964+{
3965+	struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
3966+
3967+	if (device == mipi_dsi->dsi_device)
3968+		mipi_dsi->dsi_device = NULL;
3969+	else
3970+		return -EINVAL;
3971+
3972+	return phy_exit(mipi_dsi->phy);
3973+}
3974+
3975+static const struct dw_mipi_dsi_host_ops meson_dw_mipi_dsi_host_ops = {
3976+	.attach = meson_dw_mipi_dsi_host_attach,
3977+	.detach = meson_dw_mipi_dsi_host_detach,
3978+};
3979+
3980+static int meson_dw_mipi_dsi_probe(struct platform_device *pdev)
3981+{
3982+	struct meson_dw_mipi_dsi *mipi_dsi;
3983+	struct reset_control *top_rst;
3984+	struct resource *res;
3985+	int ret;
3986+
3987+	mipi_dsi = devm_kzalloc(&pdev->dev, sizeof(*mipi_dsi), GFP_KERNEL);
3988+	if (!mipi_dsi)
3989+		return -ENOMEM;
3990+
3991+	mipi_dsi->dev = &pdev->dev;
3992+
3993+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3994+	mipi_dsi->base = devm_ioremap_resource(&pdev->dev, res);
3995+	if (IS_ERR(mipi_dsi->base))
3996+		return PTR_ERR(mipi_dsi->base);
3997+
3998+	mipi_dsi->phy = devm_phy_get(&pdev->dev, "dphy");
3999+	if (IS_ERR(mipi_dsi->phy)) {
4000+		ret = PTR_ERR(mipi_dsi->phy);
4001+		dev_err(&pdev->dev, "failed to get mipi dphy: %d\n", ret);
4002+		return ret;
4003+	}
4004+
4005+	mipi_dsi->px_clk = devm_clk_get(&pdev->dev, "px_clk");
4006+	if (IS_ERR(mipi_dsi->px_clk)) {
4007+		dev_err(&pdev->dev, "Unable to get PLL clk\n");
4008+		return PTR_ERR(mipi_dsi->px_clk);
4009+	}
4010+
4011+	/*
4012+	 * We use a TOP reset signal because the APB reset signal
4013+	 * is handled by the TOP control registers.
4014+	 */
4015+	top_rst = devm_reset_control_get_exclusive(&pdev->dev, "top");
4016+	if (IS_ERR(top_rst)) {
4017+		ret = PTR_ERR(top_rst);
4018+
4019+		if (ret != -EPROBE_DEFER)
4020+			dev_err(&pdev->dev, "Unable to get reset control: %d\n", ret);
4021+
4022+		return ret;
4023+	}
4024+
4025+	ret = clk_prepare_enable(mipi_dsi->px_clk);
4026+	if (ret) {
4027+		dev_err(&pdev->dev, "Unable to prepare/enable PX clock\n");
4028+		return ret;
4029+	}
4030+
4031+	reset_control_assert(top_rst);
4032+	usleep_range(10, 20);
4033+	reset_control_deassert(top_rst);
4034+
4035+	/* MIPI DSI Controller */
4036+
4037+	mipi_dsi->pdata.base = mipi_dsi->base;
4038+	mipi_dsi->pdata.max_data_lanes = 4;
4039+	mipi_dsi->pdata.phy_ops = &meson_dw_mipi_dsi_phy_ops;
4040+	mipi_dsi->pdata.host_ops = &meson_dw_mipi_dsi_host_ops;
4041+	mipi_dsi->pdata.priv_data = mipi_dsi;
4042+	platform_set_drvdata(pdev, mipi_dsi);
4043+
4044+	mipi_dsi->dmd = dw_mipi_dsi_probe(pdev, &mipi_dsi->pdata);
4045+	if (IS_ERR(mipi_dsi->dmd)) {
4046+		ret = PTR_ERR(mipi_dsi->dmd);
4047+		if (ret != -EPROBE_DEFER)
4048+			dev_err(&pdev->dev,
4049+				"Failed to probe dw_mipi_dsi: %d\n", ret);
4050+		goto err_clkdisable;
4051+	}
4052+
4053+	return component_add(mipi_dsi->dev, &meson_dw_mipi_dsi_ops);
4054+
4055+err_clkdisable:
4056+	clk_disable_unprepare(mipi_dsi->px_clk);
4057+
4058+	return ret;
4059+}
4060+
4061+static int meson_dw_mipi_dsi_remove(struct platform_device *pdev)
4062+{
4063+	struct meson_dw_mipi_dsi *mipi_dsi = dev_get_drvdata(&pdev->dev);
4064+
4065+	dw_mipi_dsi_remove(mipi_dsi->dmd);
4066+
4067+	component_del(mipi_dsi->dev, &meson_dw_mipi_dsi_ops);
4068+
4069+	clk_disable_unprepare(mipi_dsi->px_clk);
4070+
4071+	return 0;
4072+}
4073+
4074+static const struct of_device_id meson_dw_mipi_dsi_of_table[] = {
4075+	{ .compatible = "amlogic,meson-g12a-dw-mipi-dsi", },
4076+	{ }
4077+};
4078+MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table);
4079+
4080+static struct platform_driver meson_dw_mipi_dsi_platform_driver = {
4081+	.probe		= meson_dw_mipi_dsi_probe,
4082+	.remove		= meson_dw_mipi_dsi_remove,
4083+	.driver		= {
4084+		.name		= DRIVER_NAME,
4085+		.of_match_table	= meson_dw_mipi_dsi_of_table,
4086+	},
4087+};
4088+module_platform_driver(meson_dw_mipi_dsi_platform_driver);
4089+
4090+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
4091+MODULE_DESCRIPTION(DRIVER_DESC);
4092+MODULE_LICENSE("GPL");
4093diff -Naur a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.h b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.h
4094--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.h	1970-01-01 08:00:00.000000000 +0800
4095+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.h	2023-02-23 17:02:04.955751014 +0800
4096@@ -0,0 +1,160 @@
4097+/* SPDX-License-Identifier: GPL-2.0-or-later */
4098+/*
4099+ * Copyright (C) 2020 BayLibre, SAS
4100+ * Author: Neil Armstrong <narmstrong@baylibre.com>
4101+ * Copyright (C) 2018 Amlogic, Inc. All rights reserved.
4102+ */
4103+
4104+#ifndef __MESON_DW_MIPI_DSI_H
4105+#define __MESON_DW_MIPI_DSI_H
4106+
4107+/* Top-level registers */
4108+/* [31: 4]    Reserved.     Default 0.
4109+ *     [3] RW timing_rst_n: Default 1.
4110+ *		1=Assert SW reset of timing feature.   0=Release reset.
4111+ *     [2] RW dpi_rst_n: Default 1.
4112+ *		1=Assert SW reset on mipi_dsi_host_dpi block.   0=Release reset.
4113+ *     [1] RW intr_rst_n: Default 1.
4114+ *		1=Assert SW reset on mipi_dsi_host_intr block.  0=Release reset.
4115+ *     [0] RW dwc_rst_n:  Default 1.
4116+ *		1=Assert SW reset on IP core.   0=Release reset.
4117+ */
4118+#define MIPI_DSI_TOP_SW_RESET                      0x3c0
4119+
4120+#define MIPI_DSI_TOP_SW_RESET_DWC	BIT(0)
4121+#define MIPI_DSI_TOP_SW_RESET_INTR	BIT(1)
4122+#define MIPI_DSI_TOP_SW_RESET_DPI	BIT(2)
4123+#define MIPI_DSI_TOP_SW_RESET_TIMING	BIT(3)
4124+
4125+/* [31: 5] Reserved.   Default 0.
4126+ *     [4] RW manual_edpihalt: Default 0.
4127+ *		1=Manual suspend VencL; 0=do not suspend VencL.
4128+ *     [3] RW auto_edpihalt_en: Default 0.
4129+ *		1=Enable IP's edpihalt signal to suspend VencL;
4130+ *		0=IP's edpihalt signal does not affect VencL.
4131+ *     [2] RW clock_freerun: Apply to auto-clock gate only. Default 0.
4132+ *		0=Default, use auto-clock gating to save power;
4133+ *		1=use free-run clock, disable auto-clock gating, for debug mode.
4134+ *     [1] RW enable_pixclk: A manual clock gate option, due to DWC IP does not
4135+ *		have auto-clock gating. 1=Enable pixclk.      Default 0.
4136+ *     [0] RW enable_sysclk: A manual clock gate option, due to DWC IP does not
4137+ *		have auto-clock gating. 1=Enable sysclk.      Default 0.
4138+ */
4139+#define MIPI_DSI_TOP_CLK_CNTL                      0x3c4
4140+
4141+#define MIPI_DSI_TOP_CLK_SYSCLK_EN	BIT(0)
4142+#define MIPI_DSI_TOP_CLK_PIXCLK_EN	BIT(1)
4143+
4144+/* [31:24]    Reserved. Default 0.
4145+ * [23:20] RW dpi_color_mode: Define DPI pixel format. Default 0.
4146+ *		0=16-bit RGB565 config 1;
4147+ *		1=16-bit RGB565 config 2;
4148+ *		2=16-bit RGB565 config 3;
4149+ *		3=18-bit RGB666 config 1;
4150+ *		4=18-bit RGB666 config 2;
4151+ *		5=24-bit RGB888;
4152+ *		6=20-bit YCbCr 4:2:2;
4153+ *		7=24-bit YCbCr 4:2:2;
4154+ *		8=16-bit YCbCr 4:2:2;
4155+ *		9=30-bit RGB;
4156+ *		10=36-bit RGB;
4157+ *		11=12-bit YCbCr 4:2:0.
4158+ *    [19] Reserved. Default 0.
4159+ * [18:16] RW in_color_mode:  Define VENC data width. Default 0.
4160+ *		0=30-bit pixel;
4161+ *		1=24-bit pixel;
4162+ *		2=18-bit pixel, RGB666;
4163+ *		3=16-bit pixel, RGB565.
4164+ * [15:14] RW chroma_subsample: Define method of chroma subsampling. Default 0.
4165+ *		Applicable to YUV422 or YUV420 only.
4166+ *		0=Use even pixel's chroma;
4167+ *		1=Use odd pixel's chroma;
4168+ *		2=Use averaged value between even and odd pair.
4169+ * [13:12] RW comp2_sel:  Select which component to be Cr or B: Default 2.
4170+ *		0=comp0; 1=comp1; 2=comp2.
4171+ * [11:10] RW comp1_sel:  Select which component to be Cb or G: Default 1.
4172+ *		0=comp0; 1=comp1; 2=comp2.
4173+ *  [9: 8] RW comp0_sel:  Select which component to be Y  or R: Default 0.
4174+ *		0=comp0; 1=comp1; 2=comp2.
4175+ *     [7]    Reserved. Default 0.
4176+ *     [6] RW de_pol:  Default 0.
4177+ *		If DE input is active low, set to 1 to invert to active high.
4178+ *     [5] RW hsync_pol: Default 0.
4179+ *		If HS input is active low, set to 1 to invert to active high.
4180+ *     [4] RW vsync_pol: Default 0.
4181+ *		If VS input is active low, set to 1 to invert to active high.
4182+ *     [3] RW dpicolorm: Signal to IP.   Default 0.
4183+ *     [2] RW dpishutdn: Signal to IP.   Default 0.
4184+ *     [1]    Reserved.  Default 0.
4185+ *     [0]    Reserved.  Default 0.
4186+ */
4187+#define MIPI_DSI_TOP_CNTL                          0x3c8
4188+
4189+/* VENC data width */
4190+#define VENC_IN_COLOR_30B   0x0
4191+#define VENC_IN_COLOR_24B   0x1
4192+#define VENC_IN_COLOR_18B   0x2
4193+#define VENC_IN_COLOR_16B   0x3
4194+
4195+/* DPI pixel format */
4196+#define DPI_COLOR_16BIT_CFG_1		0
4197+#define DPI_COLOR_16BIT_CFG_2		1
4198+#define DPI_COLOR_16BIT_CFG_3		2
4199+#define DPI_COLOR_18BIT_CFG_1		3
4200+#define DPI_COLOR_18BIT_CFG_2		4
4201+#define DPI_COLOR_24BIT			5
4202+#define DPI_COLOR_20BIT_YCBCR_422	6
4203+#define DPI_COLOR_24BIT_YCBCR_422	7
4204+#define DPI_COLOR_16BIT_YCBCR_422	8
4205+#define DPI_COLOR_30BIT			9
4206+#define DPI_COLOR_36BIT			10
4207+#define DPI_COLOR_12BIT_YCBCR_420	11
4208+
4209+#define MIPI_DSI_TOP_DPI_COLOR_MODE	GENMASK(23, 20)
4210+#define MIPI_DSI_TOP_IN_COLOR_MODE	GENMASK(18, 16)
4211+#define MIPI_DSI_TOP_CHROMA_SUBSAMPLE	GENMASK(15, 14)
4212+#define MIPI_DSI_TOP_COMP2_SEL		GENMASK(13, 12)
4213+#define MIPI_DSI_TOP_COMP1_SEL		GENMASK(11, 10)
4214+#define MIPI_DSI_TOP_COMP0_SEL		GENMASK(9, 8)
4215+#define MIPI_DSI_TOP_DE_INVERT		BIT(6)
4216+#define MIPI_DSI_TOP_HSYNC_INVERT	BIT(5)
4217+#define MIPI_DSI_TOP_VSYNC_INVERT	BIT(4)
4218+#define MIPI_DSI_TOP_DPICOLORM		BIT(3)
4219+#define MIPI_DSI_TOP_DPISHUTDN		BIT(2)
4220+
4221+#define MIPI_DSI_TOP_SUSPEND_CNTL                  0x3cc
4222+#define MIPI_DSI_TOP_SUSPEND_LINE                  0x3d0
4223+#define MIPI_DSI_TOP_SUSPEND_PIX                   0x3d4
4224+#define MIPI_DSI_TOP_MEAS_CNTL                     0x3d8
4225+/* [0] R  stat_edpihalt:  edpihalt signal from IP.    Default 0. */
4226+#define MIPI_DSI_TOP_STAT                          0x3dc
4227+#define MIPI_DSI_TOP_MEAS_STAT_TE0                 0x3e0
4228+#define MIPI_DSI_TOP_MEAS_STAT_TE1                 0x3e4
4229+#define MIPI_DSI_TOP_MEAS_STAT_VS0                 0x3e8
4230+#define MIPI_DSI_TOP_MEAS_STAT_VS1                 0x3ec
4231+/* [31:16] RW intr_stat/clr. Default 0.
4232+ *		For each bit, read as this interrupt level status,
4233+ *		write 1 to clear.
4234+ * [31:22] Reserved
4235+ * [   21] stat/clr of eof interrupt
4236+ * [   21] vde_fall interrupt
4237+ * [   19] stat/clr of de_rise interrupt
4238+ * [   18] stat/clr of vs_fall interrupt
4239+ * [   17] stat/clr of vs_rise interrupt
4240+ * [   16] stat/clr of dwc_edpite interrupt
4241+ * [15: 0] RW intr_enable. Default 0.
4242+ *		For each bit, 1=enable this interrupt, 0=disable.
4243+ *	[15: 6] Reserved
4244+ *	[    5] eof interrupt
4245+ *	[    4] de_fall interrupt
4246+ *	[    3] de_rise interrupt
4247+ *	[    2] vs_fall interrupt
4248+ *	[    1] vs_rise interrupt
4249+ *	[    0] dwc_edpite interrupt
4250+ */
4251+#define MIPI_DSI_TOP_INTR_CNTL_STAT                0x3f0
4252+// 31: 2    Reserved.   Default 0.
4253+//  1: 0 RW mem_pd.     Default 3.
4254+#define MIPI_DSI_TOP_MEM_PD                        0x3f4
4255+
4256+#endif /* __MESON_DW_MIPI_DSI_H */
4257diff -Naur a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
4258--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c	1970-01-01 08:00:00.000000000 +0800
4259+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c	2023-02-23 17:02:04.955751014 +0800
4260@@ -0,0 +1,284 @@
4261+// SPDX-License-Identifier: GPL-2.0-or-later
4262+/*
4263+ * Copyright (C) 2016 BayLibre, SAS
4264+ * Author: Neil Armstrong <narmstrong@baylibre.com>
4265+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
4266+ * Copyright (C) 2014 Endless Mobile
4267+ *
4268+ * Written by:
4269+ *     Jasper St. Pierre <jstpierre@mecheye.net>
4270+ */
4271+
4272+#include <linux/export.h>
4273+#include <linux/of_graph.h>
4274+
4275+#include <drm/drm_atomic_helper.h>
4276+#include <drm/drm_bridge.h>
4277+#include <drm/drm_bridge_connector.h>
4278+#include <drm/drm_device.h>
4279+#include <drm/drm_edid.h>
4280+#include <drm/drm_probe_helper.h>
4281+#include <drm/drm_simple_kms_helper.h>
4282+
4283+#include "meson_registers.h"
4284+#include "meson_vclk.h"
4285+#include "meson_encoder_cvbs.h"
4286+
4287+/* HHI VDAC Registers */
4288+#define HHI_VDAC_CNTL0		0x2F4 /* 0xbd offset in data sheet */
4289+#define HHI_VDAC_CNTL0_G12A	0x2EC /* 0xbd offset in data sheet */
4290+#define HHI_VDAC_CNTL1		0x2F8 /* 0xbe offset in data sheet */
4291+#define HHI_VDAC_CNTL1_G12A	0x2F0 /* 0xbe offset in data sheet */
4292+
4293+struct meson_encoder_cvbs {
4294+	struct drm_encoder	encoder;
4295+	struct drm_bridge	bridge;
4296+	struct drm_bridge	*next_bridge;
4297+	struct meson_drm	*priv;
4298+};
4299+
4300+#define bridge_to_meson_encoder_cvbs(x) \
4301+	container_of(x, struct meson_encoder_cvbs, bridge)
4302+
4303+/* Supported Modes */
4304+
4305+struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
4306+	{ /* PAL */
4307+		.enci = &meson_cvbs_enci_pal,
4308+		.mode = {
4309+			DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
4310+				 720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
4311+				 DRM_MODE_FLAG_INTERLACE),
4312+			.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
4313+		},
4314+	},
4315+	{ /* NTSC */
4316+		.enci = &meson_cvbs_enci_ntsc,
4317+		.mode = {
4318+			DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
4319+				720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
4320+				DRM_MODE_FLAG_INTERLACE),
4321+			.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
4322+		},
4323+	},
4324+};
4325+
4326+static const struct meson_cvbs_mode *
4327+meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
4328+{
4329+	int i;
4330+
4331+	for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
4332+		struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
4333+
4334+		if (drm_mode_match(req_mode, &meson_mode->mode,
4335+				   DRM_MODE_MATCH_TIMINGS |
4336+				   DRM_MODE_MATCH_CLOCK |
4337+				   DRM_MODE_MATCH_FLAGS |
4338+				   DRM_MODE_MATCH_3D_FLAGS))
4339+			return meson_mode;
4340+	}
4341+
4342+	return NULL;
4343+}
4344+
4345+static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
4346+				     enum drm_bridge_attach_flags flags)
4347+{
4348+	struct meson_encoder_cvbs *meson_encoder_cvbs =
4349+					bridge_to_meson_encoder_cvbs(bridge);
4350+
4351+	return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
4352+				 &meson_encoder_cvbs->bridge, flags);
4353+}
4354+
4355+static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge,
4356+					struct drm_connector *connector)
4357+{
4358+	struct meson_encoder_cvbs *meson_encoder_cvbs =
4359+					bridge_to_meson_encoder_cvbs(bridge);
4360+	struct meson_drm *priv = meson_encoder_cvbs->priv;
4361+	struct drm_display_mode *mode;
4362+	int i;
4363+
4364+	for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
4365+		struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
4366+
4367+		mode = drm_mode_duplicate(priv->drm, &meson_mode->mode);
4368+		if (!mode) {
4369+			dev_err(priv->dev, "Failed to create a new display mode\n");
4370+			return 0;
4371+		}
4372+
4373+		drm_mode_probed_add(connector, mode);
4374+	}
4375+
4376+	return i;
4377+}
4378+
4379+static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
4380+					const struct drm_display_info *display_info,
4381+					const struct drm_display_mode *mode)
4382+{
4383+	if (meson_cvbs_get_mode(mode))
4384+		return MODE_OK;
4385+
4386+	return MODE_BAD;
4387+}
4388+
4389+static int meson_encoder_cvbs_atomic_check(struct drm_bridge *bridge,
4390+					struct drm_bridge_state *bridge_state,
4391+					struct drm_crtc_state *crtc_state,
4392+					struct drm_connector_state *conn_state)
4393+{
4394+	if (meson_cvbs_get_mode(&crtc_state->mode))
4395+		return 0;
4396+
4397+	return -EINVAL;
4398+}
4399+
4400+static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge,
4401+					     struct drm_bridge_state *bridge_state)
4402+{
4403+	struct meson_encoder_cvbs *encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge);
4404+	struct drm_atomic_state *state = bridge_state->base.state;
4405+	struct meson_drm *priv = encoder_cvbs->priv;
4406+	const struct meson_cvbs_mode *meson_mode;
4407+	struct drm_connector_state *conn_state;
4408+	struct drm_crtc_state *crtc_state;
4409+	struct drm_connector *connector;
4410+
4411+	connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
4412+	if (WARN_ON(!connector))
4413+		return;
4414+
4415+	conn_state = drm_atomic_get_new_connector_state(state, connector);
4416+	if (WARN_ON(!conn_state))
4417+		return;
4418+
4419+	crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
4420+	if (WARN_ON(!crtc_state))
4421+		return;
4422+
4423+	meson_mode = meson_cvbs_get_mode(&crtc_state->adjusted_mode);
4424+	if (WARN_ON(!meson_mode))
4425+		return;
4426+
4427+	meson_venci_cvbs_mode_set(priv, meson_mode->enci);
4428+
4429+	/* Setup 27MHz vclk2 for ENCI and VDAC */
4430+	meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
4431+			 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
4432+			 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
4433+			 true);
4434+
4435+	/* VDAC0 source is not from ATV */
4436+	writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
4437+			    priv->io_base + _REG(VENC_VDAC_DACSEL0));
4438+
4439+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
4440+		regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
4441+		regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
4442+	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
4443+		 meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
4444+		regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
4445+		regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
4446+	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
4447+		regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
4448+		regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
4449+	}
4450+}
4451+
4452+static void meson_encoder_cvbs_atomic_disable(struct drm_bridge *bridge,
4453+					      struct drm_bridge_state *bridge_state)
4454+{
4455+	struct meson_encoder_cvbs *meson_encoder_cvbs =
4456+					bridge_to_meson_encoder_cvbs(bridge);
4457+	struct meson_drm *priv = meson_encoder_cvbs->priv;
4458+
4459+	/* Disable CVBS VDAC */
4460+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
4461+		regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
4462+		regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
4463+	} else {
4464+		regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
4465+		regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
4466+	}
4467+}
4468+
4469+static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = {
4470+	.attach = meson_encoder_cvbs_attach,
4471+	.mode_valid = meson_encoder_cvbs_mode_valid,
4472+	.get_modes = meson_encoder_cvbs_get_modes,
4473+	.atomic_enable = meson_encoder_cvbs_atomic_enable,
4474+	.atomic_disable = meson_encoder_cvbs_atomic_disable,
4475+	.atomic_check = meson_encoder_cvbs_atomic_check,
4476+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
4477+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
4478+	.atomic_reset = drm_atomic_helper_bridge_reset,
4479+};
4480+
4481+int meson_encoder_cvbs_init(struct meson_drm *priv)
4482+{
4483+	struct drm_device *drm = priv->drm;
4484+	struct meson_encoder_cvbs *meson_encoder_cvbs;
4485+	struct drm_connector *connector;
4486+	struct device_node *remote;
4487+	int ret;
4488+
4489+	meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL);
4490+	if (!meson_encoder_cvbs)
4491+		return -ENOMEM;
4492+
4493+	/* CVBS Connector Bridge */
4494+	remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
4495+	if (!remote) {
4496+		dev_info(drm->dev, "CVBS Output connector not available\n");
4497+		return 0;
4498+	}
4499+
4500+	meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote);
4501+	if (!meson_encoder_cvbs->next_bridge) {
4502+		dev_err(priv->dev, "Failed to find CVBS Connector bridge\n");
4503+		return -EPROBE_DEFER;
4504+	}
4505+
4506+	/* CVBS Encoder Bridge */
4507+	meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
4508+	meson_encoder_cvbs->bridge.of_node = priv->dev->of_node;
4509+	meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite;
4510+	meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES;
4511+	meson_encoder_cvbs->bridge.interlace_allowed = true;
4512+
4513+	drm_bridge_add(&meson_encoder_cvbs->bridge);
4514+
4515+	meson_encoder_cvbs->priv = priv;
4516+
4517+	/* Encoder */
4518+	ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder,
4519+				      DRM_MODE_ENCODER_TVDAC);
4520+	if (ret) {
4521+		dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret);
4522+		return ret;
4523+	}
4524+
4525+	meson_encoder_cvbs->encoder.possible_crtcs = BIT(0);
4526+
4527+	/* Attach CVBS Encoder Bridge to Encoder */
4528+	ret = drm_bridge_attach(&meson_encoder_cvbs->encoder, &meson_encoder_cvbs->bridge, NULL,
4529+				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
4530+	if (ret) {
4531+		dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
4532+		return ret;
4533+	}
4534+
4535+	/* Initialize & attach Bridge Connector */
4536+	connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder);
4537+	if (IS_ERR(connector)) {
4538+		dev_err(priv->dev, "Unable to create CVBS bridge connector\n");
4539+		return PTR_ERR(connector);
4540+	}
4541+	drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
4542+
4543+	return 0;
4544+}
4545diff -Naur a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
4546--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h	1970-01-01 08:00:00.000000000 +0800
4547+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h	2023-02-23 17:02:04.955751014 +0800
4548@@ -0,0 +1,29 @@
4549+/* SPDX-License-Identifier: GPL-2.0-or-later */
4550+/*
4551+ * Copyright (C) 2016 BayLibre, SAS
4552+ * Author: Neil Armstrong <narmstrong@baylibre.com>
4553+ * Copyright (C) 2014 Endless Mobile
4554+ *
4555+ * Written by:
4556+ *     Jasper St. Pierre <jstpierre@mecheye.net>
4557+ */
4558+
4559+#ifndef __MESON_VENC_CVBS_H
4560+#define __MESON_VENC_CVBS_H
4561+
4562+#include "meson_drv.h"
4563+#include "meson_venc.h"
4564+
4565+struct meson_cvbs_mode {
4566+	struct meson_cvbs_enci_mode *enci;
4567+	struct drm_display_mode mode;
4568+};
4569+
4570+#define MESON_CVBS_MODES_COUNT	2
4571+
4572+/* Modes supported by the CVBS output */
4573+extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
4574+
4575+int meson_encoder_cvbs_init(struct meson_drm *priv);
4576+
4577+#endif /* __MESON_VENC_CVBS_H */
4578diff -Naur a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
4579--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c	1970-01-01 08:00:00.000000000 +0800
4580+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c	2023-02-23 17:02:04.959751069 +0800
4581@@ -0,0 +1,168 @@
4582+// SPDX-License-Identifier: GPL-2.0-or-later
4583+/*
4584+ * Copyright (C) 2016 BayLibre, SAS
4585+ * Author: Neil Armstrong <narmstrong@baylibre.com>
4586+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
4587+ */
4588+
4589+#include <linux/kernel.h>
4590+#include <linux/module.h>
4591+#include <linux/of_device.h>
4592+#include <linux/of_graph.h>
4593+
4594+#include <drm/drm_atomic_helper.h>
4595+#include <drm/drm_simple_kms_helper.h>
4596+#include <drm/drm_bridge.h>
4597+#include <drm/drm_bridge_connector.h>
4598+#include <drm/drm_device.h>
4599+#include <drm/drm_probe_helper.h>
4600+
4601+#include "meson_drv.h"
4602+#include "meson_encoder_dsi.h"
4603+#include "meson_registers.h"
4604+#include "meson_venc.h"
4605+#include "meson_vclk.h"
4606+
4607+struct meson_encoder_dsi {
4608+	struct drm_encoder encoder;
4609+	struct drm_bridge bridge;
4610+	struct drm_bridge *next_bridge;
4611+	struct meson_drm *priv;
4612+};
4613+
4614+#define bridge_to_meson_encoder_dsi(x) \
4615+	container_of(x, struct meson_encoder_dsi, bridge)
4616+
4617+static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
4618+				    enum drm_bridge_attach_flags flags)
4619+{
4620+	struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
4621+
4622+	return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge,
4623+				 &encoder_dsi->bridge, flags);
4624+}
4625+
4626+static void meson_encoder_dsi_mode_set(struct drm_bridge *bridge,
4627+				       const struct drm_display_mode *mode,
4628+				       const struct drm_display_mode *adjusted_mode)
4629+{
4630+	struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
4631+	struct meson_drm *priv = encoder_dsi->priv;
4632+
4633+	meson_vclk_setup(priv, MESON_VCLK_TARGET_DSI, mode->clock, 0, 0, 0, false);
4634+
4635+	meson_venc_mipi_dsi_mode_set(priv, mode);
4636+	meson_encl_load_gamma(priv);
4637+
4638+	writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
4639+
4640+	writel_bits_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN, ENCL_VIDEO_MODE_ADV_VFIFO_EN,
4641+			    priv->io_base + _REG(ENCL_VIDEO_MODE_ADV));
4642+	writel_relaxed(0, priv->io_base + _REG(ENCL_TST_EN));
4643+}
4644+
4645+static void meson_encoder_dsi_atomic_enable(struct drm_bridge *bridge,
4646+					    struct drm_bridge_state *bridge_state)
4647+{
4648+	struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
4649+	struct meson_drm *priv = encoder_dsi->priv;
4650+
4651+	// UNIONMAN add:  fix green/black color distortion issue with DSI
4652+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
4653+		writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT,
4654+				OSD1_HDR2_CTRL_REG_ONLY_MAT,
4655+				priv->io_base + _REG(OSD1_HDR2_CTRL));
4656+		dev_info(priv->dev, "set OSD1_HDR2_CTRL to fix green/black color distortion.\n");
4657+	}
4658+
4659+	writel_bits_relaxed(BIT(0), 0, priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
4660+
4661+	writel_relaxed(1, priv->io_base + _REG(ENCL_VIDEO_EN));
4662+}
4663+
4664+static void meson_encoder_dsi_atomic_disable(struct drm_bridge *bridge,
4665+					     struct drm_bridge_state *bridge_state)
4666+{
4667+	struct meson_encoder_dsi *meson_encoder_dsi =
4668+					bridge_to_meson_encoder_dsi(bridge);
4669+	struct meson_drm *priv = meson_encoder_dsi->priv;
4670+
4671+	writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
4672+
4673+	writel_bits_relaxed(BIT(0), BIT(0), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
4674+}
4675+
4676+static const struct drm_bridge_funcs meson_encoder_dsi_bridge_funcs = {
4677+	.attach	= meson_encoder_dsi_attach,
4678+	/*
4679+	 * TOFIX: remove when dw-mipi-dsi moves out of mode_set
4680+	 * We should get rid of mode_set, but until dw-mipi-dsi uses it
4681+	 * we need to setup the pixel clock before the following
4682+	 * bridge tries to setup the HW.
4683+	 */
4684+	.mode_set = meson_encoder_dsi_mode_set,
4685+	.atomic_enable = meson_encoder_dsi_atomic_enable,
4686+	.atomic_disable	= meson_encoder_dsi_atomic_disable,
4687+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
4688+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
4689+	.atomic_reset = drm_atomic_helper_bridge_reset,
4690+};
4691+
4692+int meson_encoder_dsi_init(struct meson_drm *priv)
4693+{
4694+	struct meson_encoder_dsi *meson_encoder_dsi;
4695+	struct device_node *remote;
4696+	int ret;
4697+
4698+	meson_encoder_dsi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_dsi), GFP_KERNEL);
4699+	if (!meson_encoder_dsi)
4700+		return -ENOMEM;
4701+
4702+	/* DSI Transceiver Bridge */
4703+	remote = of_graph_get_remote_node(priv->dev->of_node, 2, 0);
4704+	if (!remote) {
4705+		dev_err(priv->dev, "DSI transceiver device is disabled");
4706+		return 0;
4707+	}
4708+
4709+	meson_encoder_dsi->next_bridge = of_drm_find_bridge(remote);
4710+	if (!meson_encoder_dsi->next_bridge) {
4711+		dev_dbg(priv->dev, "Failed to find DSI transceiver bridge: %d\n", ret);
4712+		return -EPROBE_DEFER;
4713+	}
4714+
4715+	/* DSI Encoder Bridge */
4716+	meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
4717+	meson_encoder_dsi->bridge.of_node = priv->dev->of_node;
4718+	meson_encoder_dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
4719+
4720+	drm_bridge_add(&meson_encoder_dsi->bridge);
4721+
4722+	meson_encoder_dsi->priv = priv;
4723+
4724+	/* Encoder */
4725+	ret = drm_simple_encoder_init(priv->drm, &meson_encoder_dsi->encoder,
4726+				      DRM_MODE_ENCODER_DSI);
4727+	if (ret) {
4728+		dev_err(priv->dev, "Failed to init DSI encoder: %d\n", ret);
4729+		return ret;
4730+	}
4731+
4732+	meson_encoder_dsi->encoder.possible_crtcs = BIT(0);
4733+
4734+	/* Attach DSI Encoder Bridge to Encoder */
4735+	ret = drm_bridge_attach(&meson_encoder_dsi->encoder, &meson_encoder_dsi->bridge, NULL, 0);
4736+	if (ret) {
4737+		dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
4738+		return ret;
4739+	}
4740+
4741+	/*
4742+	 * We should have now in place:
4743+	 * encoder->[dsi encoder bridge]->[dw-mipi-dsi bridge]->[panel bridge]->[panel]
4744+	 */
4745+
4746+	dev_dbg(priv->dev, "DSI encoder initialized\n");
4747+
4748+	return 0;
4749+}
4750diff -Naur a/drivers/gpu/drm/meson/meson_encoder_dsi.h b/drivers/gpu/drm/meson/meson_encoder_dsi.h
4751--- a/drivers/gpu/drm/meson/meson_encoder_dsi.h	1970-01-01 08:00:00.000000000 +0800
4752+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.h	2023-02-23 17:02:04.959751069 +0800
4753@@ -0,0 +1,12 @@
4754+/* SPDX-License-Identifier: GPL-2.0-or-later */
4755+/*
4756+ * Copyright (C) 2021 BayLibre, SAS
4757+ * Author: Neil Armstrong <narmstrong@baylibre.com>
4758+ */
4759+
4760+#ifndef __MESON_ENCODER_DSI_H
4761+#define __MESON_ENCODER_DSI_H
4762+
4763+int meson_encoder_dsi_init(struct meson_drm *priv);
4764+
4765+#endif /* __MESON_ENCODER_DSI_H */
4766diff -Naur a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
4767--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c	1970-01-01 08:00:00.000000000 +0800
4768+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c	2023-02-23 17:02:04.959751069 +0800
4769@@ -0,0 +1,455 @@
4770+// SPDX-License-Identifier: GPL-2.0-or-later
4771+/*
4772+ * Copyright (C) 2016 BayLibre, SAS
4773+ * Author: Neil Armstrong <narmstrong@baylibre.com>
4774+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
4775+ */
4776+
4777+#include <linux/clk.h>
4778+#include <linux/component.h>
4779+#include <linux/kernel.h>
4780+#include <linux/module.h>
4781+#include <linux/of_device.h>
4782+#include <linux/of_graph.h>
4783+#include <linux/regulator/consumer.h>
4784+#include <linux/reset.h>
4785+
4786+#include <media/cec-notifier.h>
4787+
4788+#include <drm/drm_atomic_helper.h>
4789+#include <drm/drm_bridge.h>
4790+#include <drm/drm_bridge_connector.h>
4791+#include <drm/drm_device.h>
4792+#include <drm/drm_edid.h>
4793+#include <drm/drm_probe_helper.h>
4794+#include <drm/drm_simple_kms_helper.h>
4795+
4796+#include <linux/media-bus-format.h>
4797+#include <linux/videodev2.h>
4798+
4799+#include "meson_drv.h"
4800+#include "meson_registers.h"
4801+#include "meson_vclk.h"
4802+#include "meson_venc.h"
4803+#include "meson_encoder_hdmi.h"
4804+
4805+struct meson_encoder_hdmi {
4806+	struct drm_encoder encoder;
4807+	struct drm_bridge bridge;
4808+	struct drm_bridge *next_bridge;
4809+	struct drm_connector *connector;
4810+	struct meson_drm *priv;
4811+	unsigned long output_bus_fmt;
4812+	struct cec_notifier *cec_notifier;
4813+};
4814+
4815+#define bridge_to_meson_encoder_hdmi(x) \
4816+	container_of(x, struct meson_encoder_hdmi, bridge)
4817+
4818+static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
4819+				     enum drm_bridge_attach_flags flags)
4820+{
4821+	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
4822+
4823+	return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
4824+				 &encoder_hdmi->bridge, flags);
4825+}
4826+
4827+static void meson_encoder_hdmi_detach(struct drm_bridge *bridge)
4828+{
4829+	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
4830+
4831+	cec_notifier_conn_unregister(encoder_hdmi->cec_notifier);
4832+	encoder_hdmi->cec_notifier = NULL;
4833+}
4834+
4835+static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
4836+					const struct drm_display_mode *mode)
4837+{
4838+	struct meson_drm *priv = encoder_hdmi->priv;
4839+	int vic = drm_match_cea_mode(mode);
4840+	unsigned int phy_freq;
4841+	unsigned int vclk_freq;
4842+	unsigned int venc_freq;
4843+	unsigned int hdmi_freq;
4844+
4845+	vclk_freq = mode->clock;
4846+
4847+	/* For 420, pixel clock is half unlike venc clock */
4848+	if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
4849+		vclk_freq /= 2;
4850+
4851+	/* TMDS clock is pixel_clock * 10 */
4852+	phy_freq = vclk_freq * 10;
4853+
4854+	if (!vic) {
4855+		meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
4856+				 vclk_freq, vclk_freq, vclk_freq, false);
4857+		return;
4858+	}
4859+
4860+	/* 480i/576i needs global pixel doubling */
4861+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
4862+		vclk_freq *= 2;
4863+
4864+	venc_freq = vclk_freq;
4865+	hdmi_freq = vclk_freq;
4866+
4867+	/* VENC double pixels for 1080i, 720p and YUV420 modes */
4868+	if (meson_venc_hdmi_venc_repeat(vic) ||
4869+	    encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
4870+		venc_freq *= 2;
4871+
4872+	vclk_freq = max(venc_freq, hdmi_freq);
4873+
4874+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
4875+		venc_freq /= 2;
4876+
4877+	dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
4878+		phy_freq, vclk_freq, venc_freq, hdmi_freq,
4879+		priv->venc.hdmi_use_enci);
4880+
4881+	meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
4882+			 venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
4883+}
4884+
4885+static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bridge,
4886+					const struct drm_display_info *display_info,
4887+					const struct drm_display_mode *mode)
4888+{
4889+	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
4890+	struct meson_drm *priv = encoder_hdmi->priv;
4891+	bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
4892+	unsigned int phy_freq;
4893+	unsigned int vclk_freq;
4894+	unsigned int venc_freq;
4895+	unsigned int hdmi_freq;
4896+	int vic = drm_match_cea_mode(mode);
4897+	enum drm_mode_status status;
4898+
4899+	dev_dbg(priv->dev, "Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
4900+
4901+	/* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
4902+	if (display_info->max_tmds_clock &&
4903+	    mode->clock > display_info->max_tmds_clock &&
4904+	    !drm_mode_is_420_only(display_info, mode) &&
4905+	    !drm_mode_is_420_also(display_info, mode))
4906+		return MODE_BAD;
4907+
4908+	/* Check against non-VIC supported modes */
4909+	if (!vic) {
4910+		status = meson_venc_hdmi_supported_mode(mode);
4911+		if (status != MODE_OK)
4912+			return status;
4913+
4914+		return meson_vclk_dmt_supported_freq(priv, mode->clock);
4915+	/* Check against supported VIC modes */
4916+	} else if (!meson_venc_hdmi_supported_vic(vic))
4917+		return MODE_BAD;
4918+
4919+	vclk_freq = mode->clock;
4920+
4921+	/* For 420, pixel clock is half unlike venc clock */
4922+	if (drm_mode_is_420_only(display_info, mode) ||
4923+	    (!is_hdmi2_sink &&
4924+	     drm_mode_is_420_also(display_info, mode)))
4925+		vclk_freq /= 2;
4926+
4927+	/* TMDS clock is pixel_clock * 10 */
4928+	phy_freq = vclk_freq * 10;
4929+
4930+	/* 480i/576i needs global pixel doubling */
4931+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
4932+		vclk_freq *= 2;
4933+
4934+	venc_freq = vclk_freq;
4935+	hdmi_freq = vclk_freq;
4936+
4937+	/* VENC double pixels for 1080i, 720p and YUV420 modes */
4938+	if (meson_venc_hdmi_venc_repeat(vic) ||
4939+	    drm_mode_is_420_only(display_info, mode) ||
4940+	    (!is_hdmi2_sink &&
4941+	     drm_mode_is_420_also(display_info, mode)))
4942+		venc_freq *= 2;
4943+
4944+	vclk_freq = max(venc_freq, hdmi_freq);
4945+
4946+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
4947+		venc_freq /= 2;
4948+
4949+	dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
4950+		__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
4951+
4952+	return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
4953+}
4954+
4955+static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
4956+					     struct drm_bridge_state *bridge_state)
4957+{
4958+	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
4959+	struct drm_atomic_state *state = bridge_state->base.state;
4960+	unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
4961+	struct meson_drm *priv = encoder_hdmi->priv;
4962+	struct drm_connector_state *conn_state;
4963+	const struct drm_display_mode *mode;
4964+	struct drm_crtc_state *crtc_state;
4965+	struct drm_connector *connector;
4966+	bool yuv420_mode = false;
4967+	int vic;
4968+
4969+	connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
4970+	if (WARN_ON(!connector))
4971+		return;
4972+
4973+	conn_state = drm_atomic_get_new_connector_state(state, connector);
4974+	if (WARN_ON(!conn_state))
4975+		return;
4976+
4977+	crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
4978+	if (WARN_ON(!crtc_state))
4979+		return;
4980+
4981+	mode = &crtc_state->adjusted_mode;
4982+
4983+	vic = drm_match_cea_mode(mode);
4984+
4985+	dev_dbg(priv->dev, "\"%s\" vic %d\n", mode->name, vic);
4986+
4987+	if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
4988+		ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
4989+		yuv420_mode = true;
4990+	} else if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYVY8_1X16) {
4991+		ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
4992+	}
4993+
4994+	/* VENC + VENC-DVI Mode setup */
4995+	meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
4996+
4997+	/* VCLK Set clock */
4998+	meson_encoder_hdmi_set_vclk(encoder_hdmi, mode);
4999+
5000+	if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
5001+		/* Setup YUV420 to HDMI-TX, no 10bit diphering */
5002+		writel_relaxed(2 | (2 << 2),
5003+			       priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
5004+	else if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYVY8_1X16)
5005+		/* Setup YUV422 to HDMI-TX, no 10bit diphering */
5006+		writel_relaxed(1 | (2 << 2),
5007+				priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
5008+	else
5009+		/* Setup YUV444 to HDMI-TX, no 10bit diphering */
5010+		writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
5011+
5012+	dev_dbg(priv->dev, "%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
5013+
5014+	if (priv->venc.hdmi_use_enci)
5015+		writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
5016+	else
5017+		writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
5018+}
5019+
5020+static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge,
5021+					     struct drm_bridge_state *bridge_state)
5022+{
5023+	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
5024+	struct meson_drm *priv = encoder_hdmi->priv;
5025+
5026+	writel_bits_relaxed(0x3, 0,
5027+			    priv->io_base + _REG(VPU_HDMI_SETTING));
5028+
5029+	writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
5030+	writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
5031+}
5032+
5033+static const u32 meson_encoder_hdmi_out_bus_fmts[] = {
5034+	MEDIA_BUS_FMT_YUV8_1X24,
5035+	MEDIA_BUS_FMT_UYVY8_1X16,
5036+	MEDIA_BUS_FMT_UYYVYY8_0_5X24,
5037+};
5038+
5039+static u32 *
5040+meson_encoder_hdmi_get_inp_bus_fmts(struct drm_bridge *bridge,
5041+					struct drm_bridge_state *bridge_state,
5042+					struct drm_crtc_state *crtc_state,
5043+					struct drm_connector_state *conn_state,
5044+					u32 output_fmt,
5045+					unsigned int *num_input_fmts)
5046+{
5047+	u32 *input_fmts = NULL;
5048+	int i;
5049+
5050+	*num_input_fmts = 0;
5051+
5052+	for (i = 0 ; i < ARRAY_SIZE(meson_encoder_hdmi_out_bus_fmts) ; ++i) {
5053+		if (output_fmt == meson_encoder_hdmi_out_bus_fmts[i]) {
5054+			*num_input_fmts = 1;
5055+			input_fmts = kcalloc(*num_input_fmts,
5056+					     sizeof(*input_fmts),
5057+					     GFP_KERNEL);
5058+			if (!input_fmts)
5059+				return NULL;
5060+
5061+			input_fmts[0] = output_fmt;
5062+
5063+			break;
5064+		}
5065+	}
5066+
5067+	return input_fmts;
5068+}
5069+
5070+static int meson_encoder_hdmi_atomic_check(struct drm_bridge *bridge,
5071+					struct drm_bridge_state *bridge_state,
5072+					struct drm_crtc_state *crtc_state,
5073+					struct drm_connector_state *conn_state)
5074+{
5075+	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
5076+	struct drm_connector_state *old_conn_state =
5077+		drm_atomic_get_old_connector_state(conn_state->state, conn_state->connector);
5078+	struct meson_drm *priv = encoder_hdmi->priv;
5079+
5080+	encoder_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
5081+
5082+	dev_dbg(priv->dev, "output_bus_fmt %lx\n", encoder_hdmi->output_bus_fmt);
5083+
5084+	if (!drm_connector_atomic_hdr_metadata_equal(old_conn_state, conn_state))
5085+		crtc_state->mode_changed = true;
5086+
5087+	return 0;
5088+}
5089+
5090+static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
5091+					  enum drm_connector_status status)
5092+{
5093+	struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
5094+	struct edid *edid;
5095+
5096+	if (!encoder_hdmi->cec_notifier)
5097+		return;
5098+
5099+	if (status == connector_status_connected) {
5100+		edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector);
5101+		if (!edid)
5102+			return;
5103+
5104+		cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
5105+	} else
5106+		cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
5107+}
5108+
5109+static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = {
5110+	.attach = meson_encoder_hdmi_attach,
5111+	.detach = meson_encoder_hdmi_detach,
5112+	.mode_valid = meson_encoder_hdmi_mode_valid,
5113+	//.hpd_notify = meson_encoder_hdmi_hpd_notify, // UNIONMAN del
5114+	.atomic_enable = meson_encoder_hdmi_atomic_enable,
5115+	.atomic_disable = meson_encoder_hdmi_atomic_disable,
5116+	.atomic_get_input_bus_fmts = meson_encoder_hdmi_get_inp_bus_fmts,
5117+	.atomic_check = meson_encoder_hdmi_atomic_check,
5118+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
5119+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
5120+	.atomic_reset = drm_atomic_helper_bridge_reset,
5121+};
5122+
5123+int meson_encoder_hdmi_init(struct meson_drm *priv)
5124+{
5125+	struct meson_encoder_hdmi *meson_encoder_hdmi;
5126+	struct platform_device *pdev;
5127+	struct device_node *remote;
5128+	int ret;
5129+
5130+	meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL);
5131+	if (!meson_encoder_hdmi)
5132+		return -ENOMEM;
5133+
5134+	/* HDMI Transceiver Bridge */
5135+	remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0);
5136+	if (!remote) {
5137+		dev_err(priv->dev, "HDMI transceiver device is disabled");
5138+		return 0;
5139+	}
5140+
5141+	meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote);
5142+	if (!meson_encoder_hdmi->next_bridge) {
5143+		dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n");
5144+		return -EPROBE_DEFER;
5145+	}
5146+
5147+	/* HDMI Encoder Bridge */
5148+	meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs;
5149+	meson_encoder_hdmi->bridge.of_node = priv->dev->of_node;
5150+	meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
5151+	meson_encoder_hdmi->bridge.interlace_allowed = true;
5152+
5153+	drm_bridge_add(&meson_encoder_hdmi->bridge);
5154+
5155+	meson_encoder_hdmi->priv = priv;
5156+
5157+	/* Encoder */
5158+	ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder,
5159+				      DRM_MODE_ENCODER_TMDS);
5160+	if (ret) {
5161+		dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret);
5162+		return ret;
5163+	}
5164+
5165+	meson_encoder_hdmi->encoder.possible_crtcs = BIT(0);
5166+
5167+	/* Attach HDMI Encoder Bridge to Encoder */
5168+	ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL,
5169+				0/*UNIONMAN modfiy: DRM_BRIDGE_ATTACH_NO_CONNECTOR*/);
5170+	if (ret) {
5171+		dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
5172+		return ret;
5173+	}
5174+
5175+#if 0 // UNIONMAN del: use connector created by dw_hdmi instead.
5176+	/* Initialize & attach Bridge Connector */
5177+	meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm,
5178+							&meson_encoder_hdmi->encoder);
5179+	if (IS_ERR(meson_encoder_hdmi->connector)) {
5180+		dev_err(priv->dev, "Unable to create HDMI bridge connector\n");
5181+		return PTR_ERR(meson_encoder_hdmi->connector);
5182+	}
5183+	drm_connector_attach_encoder(meson_encoder_hdmi->connector,
5184+				     &meson_encoder_hdmi->encoder);
5185+
5186+	/*
5187+	 * We should have now in place:
5188+	 * encoder->[hdmi encoder bridge]->[dw-hdmi bridge]->[display connector bridge]->[display connector]
5189+	 */
5190+
5191+	/*
5192+	 * drm_connector_attach_max_bpc_property() requires the
5193+	 * connector to have a state.
5194+	 */
5195+	drm_atomic_helper_connector_reset(meson_encoder_hdmi->connector);
5196+
5197+	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
5198+	    meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
5199+	    meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
5200+		drm_connector_attach_hdr_output_metadata_property(meson_encoder_hdmi->connector);
5201+
5202+	drm_connector_attach_max_bpc_property(meson_encoder_hdmi->connector, 8, 8);
5203+
5204+	/* Handle this here until handled by drm_bridge_connector_init() */
5205+	meson_encoder_hdmi->connector->ycbcr_420_allowed = true;
5206+
5207+	pdev = of_find_device_by_node(remote);
5208+	if (pdev) {
5209+		struct cec_connector_info conn_info;
5210+		struct cec_notifier *notifier;
5211+
5212+		cec_fill_conn_info_from_drm(&conn_info, meson_encoder_hdmi->connector);
5213+
5214+		notifier = cec_notifier_conn_register(&pdev->dev, NULL, &conn_info);
5215+		if (!notifier)
5216+			return -ENOMEM;
5217+
5218+		meson_encoder_hdmi->cec_notifier = notifier;
5219+	}
5220+#endif
5221+	dev_dbg(priv->dev, "HDMI encoder initialized\n");
5222+
5223+	return 0;
5224+}
5225diff -Naur a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
5226--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h	1970-01-01 08:00:00.000000000 +0800
5227+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h	2023-02-23 17:02:04.959751069 +0800
5228@@ -0,0 +1,12 @@
5229+/* SPDX-License-Identifier: GPL-2.0-or-later */
5230+/*
5231+ * Copyright (C) 2021 BayLibre, SAS
5232+ * Author: Neil Armstrong <narmstrong@baylibre.com>
5233+ */
5234+
5235+#ifndef __MESON_ENCODER_HDMI_H
5236+#define __MESON_ENCODER_HDMI_H
5237+
5238+int meson_encoder_hdmi_init(struct meson_drm *priv);
5239+
5240+#endif /* __MESON_ENCODER_HDMI_H */
5241diff -Naur a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
5242--- a/drivers/gpu/drm/meson/meson_overlay.c	2022-12-19 17:13:12.677518989 +0800
5243+++ b/drivers/gpu/drm/meson/meson_overlay.c	2023-02-23 17:02:04.959751069 +0800
5244@@ -10,10 +10,10 @@
5245 #include <drm/drm_atomic.h>
5246 #include <drm/drm_atomic_helper.h>
5247 #include <drm/drm_device.h>
5248+#include <drm/drm_fb_cma_helper.h>
5249 #include <drm/drm_fourcc.h>
5250-#include <drm/drm_plane_helper.h>
5251 #include <drm/drm_gem_cma_helper.h>
5252-#include <drm/drm_fb_cma_helper.h>
5253+#include <drm/drm_plane_helper.h>
5254 #include <drm/drm_gem_framebuffer_helper.h>
5255
5256 #include "meson_overlay.h"
5257@@ -167,16 +167,20 @@
5258 static int meson_overlay_atomic_check(struct drm_plane *plane,
5259 				      struct drm_plane_state *state)
5260 {
5261+	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state->state,
5262+										 plane);
5263 	struct drm_crtc_state *crtc_state;
5264
5265-	if (!state->crtc)
5266+	if (!new_plane_state->crtc)
5267 		return 0;
5268
5269-	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
5270+	crtc_state = drm_atomic_get_crtc_state(state->state,
5271+					       new_plane_state->crtc);
5272 	if (IS_ERR(crtc_state))
5273 		return PTR_ERR(crtc_state);
5274
5275-	return drm_atomic_helper_check_plane_state(state, crtc_state,
5276+	return drm_atomic_helper_check_plane_state(new_plane_state,
5277+						   crtc_state,
5278 						   FRAC_16_16(1, 5),
5279 						   FRAC_16_16(5, 1),
5280 						   true, true);
5281@@ -464,11 +468,12 @@
5282 }
5283
5284 static void meson_overlay_atomic_update(struct drm_plane *plane,
5285-					struct drm_plane_state *old_state)
5286+					struct drm_plane_state *state)
5287 {
5288 	struct meson_overlay *meson_overlay = to_meson_overlay(plane);
5289-	struct drm_plane_state *state = plane->state;
5290-	struct drm_framebuffer *fb = state->fb;
5291+	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state->state,
5292+									   plane);
5293+	struct drm_framebuffer *fb = new_state->fb;
5294 	struct meson_drm *priv = meson_overlay->priv;
5295 	struct drm_gem_cma_object *gem;
5296 	unsigned long flags;
5297@@ -476,7 +481,7 @@
5298
5299 	DRM_DEBUG_DRIVER("\n");
5300
5301-	interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
5302+	interlace_mode = new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
5303
5304 	spin_lock_irqsave(&priv->drm->event_lock, flags);
5305
5306@@ -717,7 +722,7 @@
5307 }
5308
5309 static void meson_overlay_atomic_disable(struct drm_plane *plane,
5310-				       struct drm_plane_state *old_state)
5311+				       struct drm_plane_state *state)
5312 {
5313 	struct meson_overlay *meson_overlay = to_meson_overlay(plane);
5314 	struct meson_drm *priv = meson_overlay->priv;
5315diff -Naur a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
5316--- a/drivers/gpu/drm/meson/meson_plane.c	2022-12-19 17:13:12.677518989 +0800
5317+++ b/drivers/gpu/drm/meson/meson_plane.c	2023-02-23 17:02:04.959751069 +0800
5318@@ -73,12 +73,15 @@
5319 static int meson_plane_atomic_check(struct drm_plane *plane,
5320 				    struct drm_plane_state *state)
5321 {
5322+	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state->state,
5323+										 plane);
5324 	struct drm_crtc_state *crtc_state;
5325
5326-	if (!state->crtc)
5327+	if (!new_plane_state->crtc)
5328 		return 0;
5329
5330-	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
5331+	crtc_state = drm_atomic_get_crtc_state(state->state,
5332+					       new_plane_state->crtc);
5333 	if (IS_ERR(crtc_state))
5334 		return PTR_ERR(crtc_state);
5335
5336@@ -87,7 +90,8 @@
5337 	 * - Upscaling up to 5x, vertical and horizontal
5338 	 * - Final coordinates must match crtc size
5339 	 */
5340-	return drm_atomic_helper_check_plane_state(state, crtc_state,
5341+	return drm_atomic_helper_check_plane_state(new_plane_state,
5342+						   crtc_state,
5343 						   FRAC_16_16(1, 5),
5344 						   DRM_PLANE_HELPER_NO_SCALING,
5345 						   false, true);
5346@@ -126,13 +130,14 @@
5347 }
5348
5349 static void meson_plane_atomic_update(struct drm_plane *plane,
5350-				      struct drm_plane_state *old_state)
5351+				      struct drm_plane_state *state)
5352 {
5353 	struct meson_plane *meson_plane = to_meson_plane(plane);
5354-	struct drm_plane_state *state = plane->state;
5355-	struct drm_rect dest = drm_plane_state_dest(state);
5356+	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state->state,
5357+									   plane);
5358+	struct drm_rect dest = drm_plane_state_dest(new_state);
5359 	struct meson_drm *priv = meson_plane->priv;
5360-	struct drm_framebuffer *fb = state->fb;
5361+	struct drm_framebuffer *fb = new_state->fb;
5362 	struct drm_gem_cma_object *gem;
5363 	unsigned long flags;
5364 	int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
5365@@ -245,7 +250,7 @@
5366 	hf_bank_len = 4;
5367 	vf_bank_len = 4;
5368
5369-	if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
5370+	if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
5371 		vsc_bot_rcv_num = 6;
5372 		vsc_bot_rpt_p0_num = 2;
5373 	}
5374@@ -255,10 +260,10 @@
5375 	hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
5376 	vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
5377
5378-	src_w = fixed16_to_int(state->src_w);
5379-	src_h = fixed16_to_int(state->src_h);
5380-	dst_w = state->crtc_w;
5381-	dst_h = state->crtc_h;
5382+	src_w = fixed16_to_int(new_state->src_w);
5383+	src_h = fixed16_to_int(new_state->src_h);
5384+	dst_w = new_state->crtc_w;
5385+	dst_h = new_state->crtc_h;
5386
5387 	/*
5388 	 * When the output is interlaced, the OSD must switch between
5389@@ -267,7 +272,7 @@
5390 	 * But the vertical scaler can provide such funtionnality if
5391 	 * is configured for 2:1 scaling with interlace options enabled.
5392 	 */
5393-	if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
5394+	if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
5395 		dest.y1 /= 2;
5396 		dest.y2 /= 2;
5397 		dst_h /= 2;
5398@@ -276,7 +281,7 @@
5399 	hf_phase_step = ((src_w << 18) / dst_w) << 6;
5400 	vf_phase_step = (src_h << 20) / dst_h;
5401
5402-	if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
5403+	if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
5404 		bot_ini_phase = ((vf_phase_step / 2) >> 4);
5405 	else
5406 		bot_ini_phase = 0;
5407@@ -308,7 +313,7 @@
5408 					VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
5409 					VSC_VERTICAL_SCALER_EN;
5410
5411-		if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
5412+		if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
5413 			priv->viu.osd_sc_v_ctrl0 |=
5414 					VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
5415 					VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
5416@@ -343,11 +348,11 @@
5417 	 * e.g. +30x1920 would be (1919 << 16) | 30
5418 	 */
5419 	priv->viu.osd1_blk0_cfg[1] =
5420-				((fixed16_to_int(state->src.x2) - 1) << 16) |
5421-				fixed16_to_int(state->src.x1);
5422+				((fixed16_to_int(new_state->src.x2) - 1) << 16) |
5423+				fixed16_to_int(new_state->src.x1);
5424 	priv->viu.osd1_blk0_cfg[2] =
5425-				((fixed16_to_int(state->src.y2) - 1) << 16) |
5426-				fixed16_to_int(state->src.y1);
5427+				((fixed16_to_int(new_state->src.y2) - 1) << 16) |
5428+				fixed16_to_int(new_state->src.y1);
5429 	priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
5430 	priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
5431
5432@@ -355,7 +360,6 @@
5433 		priv->viu.osd_blend_din0_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
5434 		priv->viu.osd_blend_din0_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
5435 		priv->viu.osb_blend0_size = dst_h << 16 | dst_w;
5436-		priv->viu.osb_blend1_size = dst_h << 16 | dst_w;
5437 	}
5438
5439 	/* Update Canvas with buffer address */
5440@@ -391,7 +395,7 @@
5441 }
5442
5443 static void meson_plane_atomic_disable(struct drm_plane *plane,
5444-				       struct drm_plane_state *old_state)
5445+				       struct drm_plane_state *state)
5446 {
5447 	struct meson_plane *meson_plane = to_meson_plane(plane);
5448 	struct meson_drm *priv = meson_plane->priv;
5449diff -Naur a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
5450--- a/drivers/gpu/drm/meson/meson_registers.h	2022-12-19 17:13:12.677518989 +0800
5451+++ b/drivers/gpu/drm/meson/meson_registers.h	2023-02-23 17:02:04.959751069 +0800
5452@@ -812,6 +812,7 @@
5453 #define VENC_STATA 0x1b6d
5454 #define VENC_INTCTRL 0x1b6e
5455 #define		VENC_INTCTRL_ENCI_LNRST_INT_EN  BIT(1)
5456+#define		VENC_INTCTRL_ENCP_LNRST_INT_EN  BIT(9)
5457 #define VENC_INTFLAG 0x1b6f
5458 #define VENC_VIDEO_TST_EN 0x1b70
5459 #define VENC_VIDEO_TST_MDSEL 0x1b71
5460@@ -1192,7 +1193,11 @@
5461 #define ENCL_VIDEO_PB_OFFST 0x1ca5
5462 #define ENCL_VIDEO_PR_OFFST 0x1ca6
5463 #define ENCL_VIDEO_MODE 0x1ca7
5464+#define		ENCL_PX_LN_CNT_SHADOW_EN	BIT(15)
5465 #define ENCL_VIDEO_MODE_ADV 0x1ca8
5466+#define		ENCL_VIDEO_MODE_ADV_VFIFO_EN	BIT(3)
5467+#define		ENCL_VIDEO_MODE_ADV_GAIN_HDTV	BIT(4)
5468+#define		ENCL_SEL_GAMMA_RGB_IN		BIT(10)
5469 #define ENCL_DBG_PX_RST 0x1ca9
5470 #define ENCL_DBG_LN_RST 0x1caa
5471 #define ENCL_DBG_PX_INT 0x1cab
5472@@ -1219,11 +1224,14 @@
5473 #define ENCL_VIDEO_VOFFST 0x1cc0
5474 #define ENCL_VIDEO_RGB_CTRL 0x1cc1
5475 #define ENCL_VIDEO_FILT_CTRL 0x1cc2
5476+#define		ENCL_VIDEO_FILT_CTRL_BYPASS_FILTER	BIT(12)
5477 #define ENCL_VIDEO_OFLD_VPEQ_OFST 0x1cc3
5478 #define ENCL_VIDEO_OFLD_VOAV_OFST 0x1cc4
5479 #define ENCL_VIDEO_MATRIX_CB 0x1cc5
5480 #define ENCL_VIDEO_MATRIX_CR 0x1cc6
5481 #define ENCL_VIDEO_RGBIN_CTRL 0x1cc7
5482+#define		ENCL_VIDEO_RGBIN_RGB	BIT(0)
5483+#define		ENCL_VIDEO_RGBIN_ZBLK	BIT(1)
5484 #define ENCL_MAX_LINE_SWITCH_POINT 0x1cc8
5485 #define ENCL_DACSEL_0 0x1cc9
5486 #define ENCL_DACSEL_1 0x1cca
5487@@ -1300,13 +1308,28 @@
5488 #define RDMA_STATUS2 0x1116
5489 #define RDMA_STATUS3 0x1117
5490 #define L_GAMMA_CNTL_PORT 0x1400
5491+#define		L_GAMMA_CNTL_PORT_VCOM_POL	BIT(7)	/* RW */
5492+#define		L_GAMMA_CNTL_PORT_RVS_OUT	BIT(6)	/* RW */
5493+#define		L_GAMMA_CNTL_PORT_ADR_RDY	BIT(5)	/* Read Only */
5494+#define		L_GAMMA_CNTL_PORT_WR_RDY	BIT(4)	/* Read Only */
5495+#define		L_GAMMA_CNTL_PORT_RD_RDY	BIT(3)	/* Read Only */
5496+#define		L_GAMMA_CNTL_PORT_TR		BIT(2)	/* RW */
5497+#define		L_GAMMA_CNTL_PORT_SET		BIT(1)	/* RW */
5498+#define		L_GAMMA_CNTL_PORT_EN		BIT(0)	/* RW */
5499 #define L_GAMMA_DATA_PORT 0x1401
5500 #define L_GAMMA_ADDR_PORT 0x1402
5501+#define		L_GAMMA_ADDR_PORT_RD		BIT(12)
5502+#define		L_GAMMA_ADDR_PORT_AUTO_INC	BIT(11)
5503+#define		L_GAMMA_ADDR_PORT_SEL_R		BIT(10)
5504+#define		L_GAMMA_ADDR_PORT_SEL_G		BIT(9)
5505+#define		L_GAMMA_ADDR_PORT_SEL_B		BIT(8)
5506+#define		L_GAMMA_ADDR_PORT_ADDR		GENMASK(7, 0)
5507 #define L_GAMMA_VCOM_HSWITCH_ADDR 0x1403
5508 #define L_RGB_BASE_ADDR 0x1405
5509 #define L_RGB_COEFF_ADDR 0x1406
5510 #define L_POL_CNTL_ADDR 0x1407
5511 #define L_DITH_CNTL_ADDR 0x1408
5512+#define		L_DITH_CNTL_DITH10_EN	BIT(10)
5513 #define L_GAMMA_PROBE_CTRL 0x1409
5514 #define L_GAMMA_PROBE_COLOR_L 0x140a
5515 #define L_GAMMA_PROBE_COLOR_H 0x140b
5516@@ -1363,6 +1386,8 @@
5517 #define L_LCD_PWM1_HI_ADDR 0x143f
5518 #define L_INV_CNT_ADDR 0x1440
5519 #define L_TCON_MISC_SEL_ADDR 0x1441
5520+#define		L_TCON_MISC_SEL_STV1	BIT(4)
5521+#define		L_TCON_MISC_SEL_STV2	BIT(5)
5522 #define L_DUAL_PORT_CNTL_ADDR 0x1442
5523 #define MLVDS_CLK_CTL1_HI 0x1443
5524 #define MLVDS_CLK_CTL1_LO 0x1444
5525diff -Naur a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
5526--- a/drivers/gpu/drm/meson/meson_vclk.c	2022-12-19 17:13:12.677518989 +0800
5527+++ b/drivers/gpu/drm/meson/meson_vclk.c	2023-02-23 17:02:04.959751069 +0800
5528@@ -55,6 +55,8 @@
5529 #define VCLK2_DIV_MASK		0xff
5530 #define VCLK2_DIV_EN		BIT(16)
5531 #define VCLK2_DIV_RESET		BIT(17)
5532+#define CTS_ENCL_SEL_MASK	(0xf << 12)
5533+#define CTS_ENCL_SEL_SHIFT	12
5534 #define CTS_VDAC_SEL_MASK	(0xf << 28)
5535 #define CTS_VDAC_SEL_SHIFT	28
5536 #define HHI_VIID_CLK_CNTL	0x12c /* 0x4b offset in data sheet */
5537@@ -83,6 +85,7 @@
5538 #define VCLK_DIV12_EN		BIT(4)
5539 #define HHI_VID_CLK_CNTL2	0x194 /* 0x65 offset in data sheet */
5540 #define CTS_ENCI_EN		BIT(0)
5541+#define CTS_ENCL_EN		BIT(3)
5542 #define CTS_ENCP_EN		BIT(2)
5543 #define CTS_VDAC_EN		BIT(4)
5544 #define HDMI_TX_PIXEL_EN	BIT(5)
5545@@ -131,7 +134,7 @@
5546 	VID_PLL_DIV_15,
5547 };
5548
5549-void meson_vid_pll_set(struct meson_drm *priv, unsigned int div)
5550+static void meson_vid_pll_set(struct meson_drm *priv, unsigned int div)
5551 {
5552 	unsigned int shift_val = 0;
5553 	unsigned int shift_sel = 0;
5554@@ -357,6 +360,8 @@
5555 	MESON_VCLK_HDMI_594000,
5556 /* 2970 /1 /1 /1 /5 /1  => /1 /2 */
5557 	MESON_VCLK_HDMI_594000_YUV420,
5558+/* 4830 /2 /1 /2 /5 /1  => /1 /1 */
5559+	MESON_VCLK_HDMI_241500,
5560 };
5561
5562 struct meson_vclk_params {
5563@@ -467,6 +472,18 @@
5564 		.vid_pll_div = VID_PLL_DIV_5,
5565 		.vclk_div = 1,
5566 	},
5567+	[MESON_VCLK_HDMI_241500] = {
5568+		.pll_freq = 4830000,
5569+		.phy_freq = 2415000,
5570+		.venc_freq = 241500,
5571+		.vclk_freq = 241500,
5572+		.pixel_freq = 241500,
5573+		.pll_od1 = 2,
5574+		.pll_od2 = 1,
5575+		.pll_od3 = 2,
5576+		.vid_pll_div = VID_PLL_DIV_5,
5577+		.vclk_div = 1,
5578+	},
5579 	{ /* sentinel */ },
5580 };
5581
5582@@ -487,9 +504,9 @@
5583 	return 0;
5584 }
5585
5586-void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
5587-			       unsigned int frac, unsigned int od1,
5588-			       unsigned int od2, unsigned int od3)
5589+static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
5590+				      unsigned int frac, unsigned int od1,
5591+				      unsigned int od2, unsigned int od3)
5592 {
5593 	unsigned int val;
5594
5595@@ -873,6 +890,10 @@
5596 			m = 0xf7;
5597 			frac = vic_alternate_clock ? 0x8148 : 0x10000;
5598 			break;
5599+		case 4830000:
5600+			m = 0xc9;
5601+			frac = 0xd560;
5602+			break;
5603 		}
5604
5605 		meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
5606@@ -1024,6 +1045,47 @@
5607 	regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
5608 }
5609
5610+static void meson_dsi_clock_config(struct meson_drm *priv, unsigned int freq)
5611+{
5612+	meson_hdmi_pll_generic_set(priv, freq * 10);
5613+
5614+	/* Setup vid_pll divider value /5 */
5615+	meson_vid_pll_set(priv, VID_PLL_DIV_5);
5616+
5617+	/* Disable VCLK2 */
5618+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
5619+
5620+	/* Setup the VCLK2 divider value /2 */
5621+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV, VCLK2_DIV_MASK, 2 - 1);
5622+
5623+	/* select vid_pll for vclk2 */
5624+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
5625+			   VCLK2_SEL_MASK, (0 << VCLK2_SEL_SHIFT));
5626+
5627+	/* enable vclk2 gate */
5628+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, VCLK2_EN);
5629+
5630+	/* select vclk2_div1 for encl */
5631+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
5632+			   CTS_ENCL_SEL_MASK, (8 << CTS_ENCL_SEL_SHIFT));
5633+
5634+	/* release vclk2_div_reset and enable vclk2_div */
5635+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV, VCLK2_DIV_EN | VCLK2_DIV_RESET,
5636+			   VCLK2_DIV_EN);
5637+
5638+	/* enable vclk2_div1 gate */
5639+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_DIV1_EN, VCLK2_DIV1_EN);
5640+
5641+	/* reset vclk2 */
5642+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_SOFT_RESET, VCLK2_SOFT_RESET);
5643+	regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_SOFT_RESET, 0);
5644+
5645+	/* enable encl_clk */
5646+	regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2, CTS_ENCL_EN, CTS_ENCL_EN);
5647+
5648+	usleep_range(10000, 11000);
5649+}
5650+
5651 void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
5652 		      unsigned int phy_freq, unsigned int vclk_freq,
5653 		      unsigned int venc_freq, unsigned int dac_freq,
5654@@ -1050,6 +1112,9 @@
5655 		meson_vclk_set(priv, phy_freq, 0, 0, 0,
5656 			       VID_PLL_DIV_5, 2, 1, 1, false, false);
5657 		return;
5658+	} else if (target == MESON_VCLK_TARGET_DSI) {
5659+		meson_dsi_clock_config(priv, phy_freq);
5660+		return;
5661 	}
5662
5663 	hdmi_tx_div = vclk_freq / dac_freq;
5664diff -Naur a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
5665--- a/drivers/gpu/drm/meson/meson_vclk.h	2022-12-19 17:13:12.677518989 +0800
5666+++ b/drivers/gpu/drm/meson/meson_vclk.h	2023-02-23 17:02:04.959751069 +0800
5667@@ -17,6 +17,7 @@
5668 	MESON_VCLK_TARGET_CVBS = 0,
5669 	MESON_VCLK_TARGET_HDMI = 1,
5670 	MESON_VCLK_TARGET_DMT = 2,
5671+	MESON_VCLK_TARGET_DSI = 3,
5672 };
5673
5674 /* 27MHz is the CVBS Pixel Clock */
5675diff -Naur a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
5676--- a/drivers/gpu/drm/meson/meson_venc.c	2022-12-19 17:13:12.677518989 +0800
5677+++ b/drivers/gpu/drm/meson/meson_venc.c	2023-02-23 17:02:04.959751069 +0800
5678@@ -6,6 +6,7 @@
5679  */
5680
5681 #include <linux/export.h>
5682+#include <linux/iopoll.h>
5683
5684 #include <drm/drm_modes.h>
5685
5686@@ -45,7 +46,7 @@
5687  * The ENCI is designed for PAl or NTSC encoding and can go through the VDAC
5688  * directly for CVBS encoding or through the ENCI_DVI encoder for HDMI.
5689  * The ENCP is designed for Progressive encoding but can also generate
5690- * 1080i interlaced pixels, and was initialy desined to encode pixels for
5691+ * 1080i interlaced pixels, and was initially designed to encode pixels for
5692  * VDAC to output RGB ou YUV analog outputs.
5693  * It's output is only used through the ENCP_DVI encoder for HDMI.
5694  * The ENCL LVDS encoder is not implemented.
5695@@ -866,10 +867,11 @@
5696 			    DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
5697 		return MODE_BAD;
5698
5699-	if (mode->hdisplay < 640 || mode->hdisplay > 1920)
5700+	/* support higher resolution than 1920x1080 */
5701+	if (mode->hdisplay < 640 || mode->hdisplay > 2560)
5702 		return MODE_BAD_HVALUE;
5703
5704-	if (mode->vdisplay < 480 || mode->vdisplay > 1200)
5705+	if (mode->vdisplay < 480 || mode->vdisplay > 1600)
5706 		return MODE_BAD_VVALUE;
5707
5708 	return MODE_OK;
5709@@ -890,8 +892,8 @@
5710 }
5711 EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
5712
5713-void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode,
5714-				   union meson_hdmi_venc_mode *dmt_mode)
5715+static void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode,
5716+					  union meson_hdmi_venc_mode *dmt_mode)
5717 {
5718 	memset(dmt_mode, 0, sizeof(*dmt_mode));
5719
5720@@ -1557,6 +1559,205 @@
5721 }
5722 EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set);
5723
5724+static unsigned short meson_encl_gamma_table[256] = {
5725+	0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60,
5726+	64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124,
5727+	128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188,
5728+	192, 196, 200, 204, 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 248, 252,
5729+	256, 260, 264, 268, 272, 276, 280, 284, 288, 292, 296, 300, 304, 308, 312, 316,
5730+	320, 324, 328, 332, 336, 340, 344, 348, 352, 356, 360, 364, 368, 372, 376, 380,
5731+	384, 388, 392, 396, 400, 404, 408, 412, 416, 420, 424, 428, 432, 436, 440, 444,
5732+	448, 452, 456, 460, 464, 468, 472, 476, 480, 484, 488, 492, 496, 500, 504, 508,
5733+	512, 516, 520, 524, 528, 532, 536, 540, 544, 548, 552, 556, 560, 564, 568, 572,
5734+	576, 580, 584, 588, 592, 596, 600, 604, 608, 612, 616, 620, 624, 628, 632, 636,
5735+	640, 644, 648, 652, 656, 660, 664, 668, 672, 676, 680, 684, 688, 692, 696, 700,
5736+	704, 708, 712, 716, 720, 724, 728, 732, 736, 740, 744, 748, 752, 756, 760, 764,
5737+	768, 772, 776, 780, 784, 788, 792, 796, 800, 804, 808, 812, 816, 820, 824, 828,
5738+	832, 836, 840, 844, 848, 852, 856, 860, 864, 868, 872, 876, 880, 884, 888, 892,
5739+	896, 900, 904, 908, 912, 916, 920, 924, 928, 932, 936, 940, 944, 948, 952, 956,
5740+	960, 964, 968, 972, 976, 980, 984, 988, 992, 996, 1000, 1004, 1008, 1012, 1016, 1020,
5741+};
5742+
5743+static void meson_encl_set_gamma_table(struct meson_drm *priv, u16 *data,
5744+				       u32 rgb_mask)
5745+{
5746+	int i, ret;
5747+	u32 reg;
5748+
5749+	writel_bits_relaxed(L_GAMMA_CNTL_PORT_EN, 0,
5750+			    priv->io_base + _REG(L_GAMMA_CNTL_PORT));
5751+
5752+	ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT),
5753+					 reg, reg & L_GAMMA_CNTL_PORT_ADR_RDY, 10, 10000);
5754+	if (ret)
5755+		pr_warn("%s: GAMMA ADR_RDY timeout\n", __func__);
5756+
5757+	writel_relaxed(L_GAMMA_ADDR_PORT_AUTO_INC | rgb_mask |
5758+		       FIELD_PREP(L_GAMMA_ADDR_PORT_ADDR, 0),
5759+		       priv->io_base + _REG(L_GAMMA_ADDR_PORT));
5760+
5761+	for (i = 0; i < 256; i++) {
5762+		ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT),
5763+						 reg, reg & L_GAMMA_CNTL_PORT_WR_RDY,
5764+						 10, 10000);
5765+		if (ret)
5766+			pr_warn_once("%s: GAMMA WR_RDY timeout\n", __func__);
5767+
5768+		writel_relaxed(data[i], priv->io_base + _REG(L_GAMMA_DATA_PORT));
5769+	}
5770+
5771+	ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT),
5772+					 reg, reg & L_GAMMA_CNTL_PORT_ADR_RDY, 10, 10000);
5773+	if (ret)
5774+		pr_warn("%s: GAMMA ADR_RDY timeout\n", __func__);
5775+
5776+	writel_relaxed(L_GAMMA_ADDR_PORT_AUTO_INC | rgb_mask |
5777+		       FIELD_PREP(L_GAMMA_ADDR_PORT_ADDR, 0x23),
5778+		       priv->io_base + _REG(L_GAMMA_ADDR_PORT));
5779+}
5780+
5781+void meson_encl_load_gamma(struct meson_drm *priv)
5782+{
5783+	meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_R);
5784+	meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_G);
5785+	meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_B);
5786+
5787+	writel_bits_relaxed(L_GAMMA_CNTL_PORT_EN, L_GAMMA_CNTL_PORT_EN,
5788+			    priv->io_base + _REG(L_GAMMA_CNTL_PORT));
5789+}
5790+
5791+void meson_venc_mipi_dsi_mode_set(struct meson_drm *priv,
5792+				  const struct drm_display_mode *mode)
5793+{
5794+	unsigned int max_pxcnt;
5795+	unsigned int max_lncnt;
5796+	unsigned int havon_begin;
5797+	unsigned int havon_end;
5798+	unsigned int vavon_bline;
5799+	unsigned int vavon_eline;
5800+	unsigned int hso_begin;
5801+	unsigned int hso_end;
5802+	unsigned int vso_begin;
5803+	unsigned int vso_end;
5804+	unsigned int vso_bline;
5805+	unsigned int vso_eline;
5806+
5807+	max_pxcnt = mode->htotal - 1;
5808+	max_lncnt = mode->vtotal - 1;
5809+	havon_begin = mode->htotal - mode->hsync_start;
5810+	havon_end = havon_begin + mode->hdisplay - 1;
5811+	vavon_bline = mode->vtotal - mode->vsync_start;
5812+	vavon_eline = vavon_bline + mode->vdisplay - 1;
5813+	hso_begin = 0;
5814+	hso_end = mode->hsync_end - mode->hsync_start;
5815+	vso_begin = 0;
5816+	vso_end = 0;
5817+	vso_bline = 0;
5818+	vso_eline = mode->vsync_end - mode->vsync_start;
5819+
5820+	meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCL);
5821+
5822+	writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
5823+
5824+	writel_relaxed(ENCL_PX_LN_CNT_SHADOW_EN, priv->io_base + _REG(ENCL_VIDEO_MODE));
5825+	writel_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN |
5826+		       ENCL_VIDEO_MODE_ADV_GAIN_HDTV |
5827+		       ENCL_SEL_GAMMA_RGB_IN, priv->io_base + _REG(ENCL_VIDEO_MODE_ADV));
5828+
5829+	writel_relaxed(ENCL_VIDEO_FILT_CTRL_BYPASS_FILTER,
5830+		       priv->io_base + _REG(ENCL_VIDEO_FILT_CTRL));
5831+	writel_relaxed(max_pxcnt, priv->io_base + _REG(ENCL_VIDEO_MAX_PXCNT));
5832+	writel_relaxed(max_lncnt, priv->io_base + _REG(ENCL_VIDEO_MAX_LNCNT));
5833+	writel_relaxed(havon_begin, priv->io_base + _REG(ENCL_VIDEO_HAVON_BEGIN));
5834+	writel_relaxed(havon_end, priv->io_base + _REG(ENCL_VIDEO_HAVON_END));
5835+	writel_relaxed(vavon_bline, priv->io_base + _REG(ENCL_VIDEO_VAVON_BLINE));
5836+	writel_relaxed(vavon_eline, priv->io_base + _REG(ENCL_VIDEO_VAVON_ELINE));
5837+
5838+	writel_relaxed(hso_begin, priv->io_base + _REG(ENCL_VIDEO_HSO_BEGIN));
5839+	writel_relaxed(hso_end, priv->io_base + _REG(ENCL_VIDEO_HSO_END));
5840+	writel_relaxed(vso_begin, priv->io_base + _REG(ENCL_VIDEO_VSO_BEGIN));
5841+	writel_relaxed(vso_end, priv->io_base + _REG(ENCL_VIDEO_VSO_END));
5842+	writel_relaxed(vso_bline, priv->io_base + _REG(ENCL_VIDEO_VSO_BLINE));
5843+	writel_relaxed(vso_eline, priv->io_base + _REG(ENCL_VIDEO_VSO_ELINE));
5844+	writel_relaxed(ENCL_VIDEO_RGBIN_RGB | ENCL_VIDEO_RGBIN_ZBLK,
5845+		       priv->io_base + _REG(ENCL_VIDEO_RGBIN_CTRL));
5846+
5847+	/* default black pattern */
5848+	writel_relaxed(0, priv->io_base + _REG(ENCL_TST_MDSEL));
5849+	writel_relaxed(0, priv->io_base + _REG(ENCL_TST_Y));
5850+	writel_relaxed(0, priv->io_base + _REG(ENCL_TST_CB));
5851+	writel_relaxed(0, priv->io_base + _REG(ENCL_TST_CR));
5852+	writel_relaxed(1, priv->io_base + _REG(ENCL_TST_EN));
5853+	writel_bits_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN, 0,
5854+			    priv->io_base + _REG(ENCL_VIDEO_MODE_ADV));
5855+
5856+	writel_relaxed(1, priv->io_base + _REG(ENCL_VIDEO_EN));
5857+
5858+	writel_relaxed(0, priv->io_base + _REG(L_RGB_BASE_ADDR));
5859+	writel_relaxed(0x400, priv->io_base + _REG(L_RGB_COEFF_ADDR)); /* Magic value */
5860+
5861+	writel_relaxed(L_DITH_CNTL_DITH10_EN, priv->io_base + _REG(L_DITH_CNTL_ADDR));
5862+
5863+	/* DE signal for TTL */
5864+	writel_relaxed(havon_begin, priv->io_base + _REG(L_OEH_HS_ADDR));
5865+	writel_relaxed(havon_end + 1, priv->io_base + _REG(L_OEH_HE_ADDR));
5866+	writel_relaxed(vavon_bline, priv->io_base + _REG(L_OEH_VS_ADDR));
5867+	writel_relaxed(vavon_eline, priv->io_base + _REG(L_OEH_VE_ADDR));
5868+
5869+	/* DE signal for TTL */
5870+	writel_relaxed(havon_begin, priv->io_base + _REG(L_OEV1_HS_ADDR));
5871+	writel_relaxed(havon_end + 1, priv->io_base + _REG(L_OEV1_HE_ADDR));
5872+	writel_relaxed(vavon_bline, priv->io_base + _REG(L_OEV1_VS_ADDR));
5873+	writel_relaxed(vavon_eline, priv->io_base + _REG(L_OEV1_VE_ADDR));
5874+
5875+	/* Hsync signal for TTL */
5876+	if (mode->flags & DRM_MODE_FLAG_PHSYNC) {
5877+		writel_relaxed(hso_end, priv->io_base + _REG(L_STH1_HS_ADDR));
5878+		writel_relaxed(hso_begin, priv->io_base + _REG(L_STH1_HE_ADDR));
5879+	} else {
5880+		writel_relaxed(hso_begin, priv->io_base + _REG(L_STH1_HS_ADDR));
5881+		writel_relaxed(hso_end, priv->io_base + _REG(L_STH1_HE_ADDR));
5882+	}
5883+	writel_relaxed(0, priv->io_base + _REG(L_STH1_VS_ADDR));
5884+	writel_relaxed(max_lncnt, priv->io_base + _REG(L_STH1_VE_ADDR));
5885+
5886+	/* Vsync signal for TTL */
5887+	writel_relaxed(vso_begin, priv->io_base + _REG(L_STV1_HS_ADDR));
5888+	writel_relaxed(vso_end, priv->io_base + _REG(L_STV1_HE_ADDR));
5889+	if (mode->flags & DRM_MODE_FLAG_PVSYNC) {
5890+		writel_relaxed(vso_eline, priv->io_base + _REG(L_STV1_VS_ADDR));
5891+		writel_relaxed(vso_bline, priv->io_base + _REG(L_STV1_VE_ADDR));
5892+	} else {
5893+		writel_relaxed(vso_bline, priv->io_base + _REG(L_STV1_VS_ADDR));
5894+		writel_relaxed(vso_eline, priv->io_base + _REG(L_STV1_VE_ADDR));
5895+	}
5896+
5897+	/* DE signal */
5898+	writel_relaxed(havon_begin, priv->io_base + _REG(L_DE_HS_ADDR));
5899+	writel_relaxed(havon_end + 1, priv->io_base + _REG(L_DE_HE_ADDR));
5900+	writel_relaxed(vavon_bline, priv->io_base + _REG(L_DE_VS_ADDR));
5901+	writel_relaxed(vavon_eline, priv->io_base + _REG(L_DE_VE_ADDR));
5902+
5903+	/* Hsync signal */
5904+	writel_relaxed(hso_begin, priv->io_base + _REG(L_HSYNC_HS_ADDR));
5905+	writel_relaxed(hso_end, priv->io_base + _REG(L_HSYNC_HE_ADDR));
5906+	writel_relaxed(0, priv->io_base + _REG(L_HSYNC_VS_ADDR));
5907+	writel_relaxed(max_lncnt, priv->io_base + _REG(L_HSYNC_VE_ADDR));
5908+
5909+	/* Vsync signal */
5910+	writel_relaxed(vso_begin, priv->io_base + _REG(L_VSYNC_HS_ADDR));
5911+	writel_relaxed(vso_end, priv->io_base + _REG(L_VSYNC_HE_ADDR));
5912+	writel_relaxed(vso_bline, priv->io_base + _REG(L_VSYNC_VS_ADDR));
5913+	writel_relaxed(vso_eline, priv->io_base + _REG(L_VSYNC_VE_ADDR));
5914+
5915+	writel_relaxed(0, priv->io_base + _REG(L_INV_CNT_ADDR));
5916+	writel_relaxed(L_TCON_MISC_SEL_STV1 | L_TCON_MISC_SEL_STV2,
5917+		       priv->io_base + _REG(L_TCON_MISC_SEL_ADDR));
5918+
5919+	priv->venc.current_mode = MESON_VENC_MODE_MIPI_DSI;
5920+}
5921+EXPORT_SYMBOL_GPL(meson_venc_mipi_dsi_mode_set);
5922+
5923 void meson_venci_cvbs_mode_set(struct meson_drm *priv,
5924 			       struct meson_cvbs_enci_mode *mode)
5925 {
5926@@ -1747,8 +1948,15 @@
5927
5928 void meson_venc_enable_vsync(struct meson_drm *priv)
5929 {
5930-	writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN,
5931-		       priv->io_base + _REG(VENC_INTCTRL));
5932+	switch (priv->venc.current_mode) {
5933+	case MESON_VENC_MODE_MIPI_DSI:
5934+		writel_relaxed(VENC_INTCTRL_ENCP_LNRST_INT_EN,
5935+			       priv->io_base + _REG(VENC_INTCTRL));
5936+		break;
5937+	default:
5938+		writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN,
5939+			       priv->io_base + _REG(VENC_INTCTRL));
5940+	}
5941 	regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
5942 }
5943
5944diff -Naur a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
5945--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c	2022-12-19 17:13:12.677518989 +0800
5946+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c	1970-01-01 08:00:00.000000000 +0800
5947@@ -1,293 +0,0 @@
5948-// SPDX-License-Identifier: GPL-2.0-or-later
5949-/*
5950- * Copyright (C) 2016 BayLibre, SAS
5951- * Author: Neil Armstrong <narmstrong@baylibre.com>
5952- * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
5953- * Copyright (C) 2014 Endless Mobile
5954- *
5955- * Written by:
5956- *     Jasper St. Pierre <jstpierre@mecheye.net>
5957- */
5958-
5959-#include <linux/export.h>
5960-#include <linux/of_graph.h>
5961-
5962-#include <drm/drm_atomic_helper.h>
5963-#include <drm/drm_device.h>
5964-#include <drm/drm_edid.h>
5965-#include <drm/drm_probe_helper.h>
5966-#include <drm/drm_print.h>
5967-
5968-#include "meson_registers.h"
5969-#include "meson_vclk.h"
5970-#include "meson_venc_cvbs.h"
5971-
5972-/* HHI VDAC Registers */
5973-#define HHI_VDAC_CNTL0		0x2F4 /* 0xbd offset in data sheet */
5974-#define HHI_VDAC_CNTL0_G12A	0x2EC /* 0xbd offset in data sheet */
5975-#define HHI_VDAC_CNTL1		0x2F8 /* 0xbe offset in data sheet */
5976-#define HHI_VDAC_CNTL1_G12A	0x2F0 /* 0xbe offset in data sheet */
5977-
5978-struct meson_venc_cvbs {
5979-	struct drm_encoder	encoder;
5980-	struct drm_connector	connector;
5981-	struct meson_drm	*priv;
5982-};
5983-#define encoder_to_meson_venc_cvbs(x) \
5984-	container_of(x, struct meson_venc_cvbs, encoder)
5985-
5986-#define connector_to_meson_venc_cvbs(x) \
5987-	container_of(x, struct meson_venc_cvbs, connector)
5988-
5989-/* Supported Modes */
5990-
5991-struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
5992-	{ /* PAL */
5993-		.enci = &meson_cvbs_enci_pal,
5994-		.mode = {
5995-			DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
5996-				 720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
5997-				 DRM_MODE_FLAG_INTERLACE),
5998-			.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
5999-		},
6000-	},
6001-	{ /* NTSC */
6002-		.enci = &meson_cvbs_enci_ntsc,
6003-		.mode = {
6004-			DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
6005-				720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
6006-				DRM_MODE_FLAG_INTERLACE),
6007-			.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
6008-		},
6009-	},
6010-};
6011-
6012-static const struct meson_cvbs_mode *
6013-meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
6014-{
6015-	int i;
6016-
6017-	for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
6018-		struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
6019-
6020-		if (drm_mode_match(req_mode, &meson_mode->mode,
6021-				   DRM_MODE_MATCH_TIMINGS |
6022-				   DRM_MODE_MATCH_CLOCK |
6023-				   DRM_MODE_MATCH_FLAGS |
6024-				   DRM_MODE_MATCH_3D_FLAGS))
6025-			return meson_mode;
6026-	}
6027-
6028-	return NULL;
6029-}
6030-
6031-/* Connector */
6032-
6033-static void meson_cvbs_connector_destroy(struct drm_connector *connector)
6034-{
6035-	drm_connector_cleanup(connector);
6036-}
6037-
6038-static enum drm_connector_status
6039-meson_cvbs_connector_detect(struct drm_connector *connector, bool force)
6040-{
6041-	/* FIXME: Add load-detect or jack-detect if possible */
6042-	return connector_status_connected;
6043-}
6044-
6045-static int meson_cvbs_connector_get_modes(struct drm_connector *connector)
6046-{
6047-	struct drm_device *dev = connector->dev;
6048-	struct drm_display_mode *mode;
6049-	int i;
6050-
6051-	for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
6052-		struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
6053-
6054-		mode = drm_mode_duplicate(dev, &meson_mode->mode);
6055-		if (!mode) {
6056-			DRM_ERROR("Failed to create a new display mode\n");
6057-			return 0;
6058-		}
6059-
6060-		drm_mode_probed_add(connector, mode);
6061-	}
6062-
6063-	return i;
6064-}
6065-
6066-static int meson_cvbs_connector_mode_valid(struct drm_connector *connector,
6067-					   struct drm_display_mode *mode)
6068-{
6069-	/* Validate the modes added in get_modes */
6070-	return MODE_OK;
6071-}
6072-
6073-static const struct drm_connector_funcs meson_cvbs_connector_funcs = {
6074-	.detect			= meson_cvbs_connector_detect,
6075-	.fill_modes		= drm_helper_probe_single_connector_modes,
6076-	.destroy		= meson_cvbs_connector_destroy,
6077-	.reset			= drm_atomic_helper_connector_reset,
6078-	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
6079-	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
6080-};
6081-
6082-static const
6083-struct drm_connector_helper_funcs meson_cvbs_connector_helper_funcs = {
6084-	.get_modes	= meson_cvbs_connector_get_modes,
6085-	.mode_valid	= meson_cvbs_connector_mode_valid,
6086-};
6087-
6088-/* Encoder */
6089-
6090-static void meson_venc_cvbs_encoder_destroy(struct drm_encoder *encoder)
6091-{
6092-	drm_encoder_cleanup(encoder);
6093-}
6094-
6095-static const struct drm_encoder_funcs meson_venc_cvbs_encoder_funcs = {
6096-	.destroy        = meson_venc_cvbs_encoder_destroy,
6097-};
6098-
6099-static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
6100-					struct drm_crtc_state *crtc_state,
6101-					struct drm_connector_state *conn_state)
6102-{
6103-	if (meson_cvbs_get_mode(&crtc_state->mode))
6104-		return 0;
6105-
6106-	return -EINVAL;
6107-}
6108-
6109-static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
6110-{
6111-	struct meson_venc_cvbs *meson_venc_cvbs =
6112-					encoder_to_meson_venc_cvbs(encoder);
6113-	struct meson_drm *priv = meson_venc_cvbs->priv;
6114-
6115-	/* Disable CVBS VDAC */
6116-	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
6117-		regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
6118-		regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
6119-	} else {
6120-		regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
6121-		regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
6122-	}
6123-}
6124-
6125-static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
6126-{
6127-	struct meson_venc_cvbs *meson_venc_cvbs =
6128-					encoder_to_meson_venc_cvbs(encoder);
6129-	struct meson_drm *priv = meson_venc_cvbs->priv;
6130-
6131-	/* VDAC0 source is not from ATV */
6132-	writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
6133-			    priv->io_base + _REG(VENC_VDAC_DACSEL0));
6134-
6135-	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
6136-		regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
6137-		regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
6138-	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
6139-		 meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
6140-		regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
6141-		regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
6142-	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
6143-		regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
6144-		regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
6145-	}
6146-}
6147-
6148-static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
6149-				   struct drm_display_mode *mode,
6150-				   struct drm_display_mode *adjusted_mode)
6151-{
6152-	const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
6153-	struct meson_venc_cvbs *meson_venc_cvbs =
6154-					encoder_to_meson_venc_cvbs(encoder);
6155-	struct meson_drm *priv = meson_venc_cvbs->priv;
6156-
6157-	if (meson_mode) {
6158-		meson_venci_cvbs_mode_set(priv, meson_mode->enci);
6159-
6160-		/* Setup 27MHz vclk2 for ENCI and VDAC */
6161-		meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
6162-				 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
6163-				 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
6164-				 true);
6165-	}
6166-}
6167-
6168-static const struct drm_encoder_helper_funcs
6169-				meson_venc_cvbs_encoder_helper_funcs = {
6170-	.atomic_check	= meson_venc_cvbs_encoder_atomic_check,
6171-	.disable	= meson_venc_cvbs_encoder_disable,
6172-	.enable		= meson_venc_cvbs_encoder_enable,
6173-	.mode_set	= meson_venc_cvbs_encoder_mode_set,
6174-};
6175-
6176-static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv)
6177-{
6178-	struct device_node *remote;
6179-
6180-	remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
6181-	if (!remote)
6182-		return false;
6183-
6184-	of_node_put(remote);
6185-	return true;
6186-}
6187-
6188-int meson_venc_cvbs_create(struct meson_drm *priv)
6189-{
6190-	struct drm_device *drm = priv->drm;
6191-	struct meson_venc_cvbs *meson_venc_cvbs;
6192-	struct drm_connector *connector;
6193-	struct drm_encoder *encoder;
6194-	int ret;
6195-
6196-	if (!meson_venc_cvbs_connector_is_available(priv)) {
6197-		dev_info(drm->dev, "CVBS Output connector not available\n");
6198-		return 0;
6199-	}
6200-
6201-	meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs),
6202-				       GFP_KERNEL);
6203-	if (!meson_venc_cvbs)
6204-		return -ENOMEM;
6205-
6206-	meson_venc_cvbs->priv = priv;
6207-	encoder = &meson_venc_cvbs->encoder;
6208-	connector = &meson_venc_cvbs->connector;
6209-
6210-	/* Connector */
6211-
6212-	drm_connector_helper_add(connector,
6213-				 &meson_cvbs_connector_helper_funcs);
6214-
6215-	ret = drm_connector_init(drm, connector, &meson_cvbs_connector_funcs,
6216-				 DRM_MODE_CONNECTOR_Composite);
6217-	if (ret) {
6218-		dev_err(priv->dev, "Failed to init CVBS connector\n");
6219-		return ret;
6220-	}
6221-
6222-	connector->interlace_allowed = 1;
6223-
6224-	/* Encoder */
6225-
6226-	drm_encoder_helper_add(encoder, &meson_venc_cvbs_encoder_helper_funcs);
6227-
6228-	ret = drm_encoder_init(drm, encoder, &meson_venc_cvbs_encoder_funcs,
6229-			       DRM_MODE_ENCODER_TVDAC, "meson_venc_cvbs");
6230-	if (ret) {
6231-		dev_err(priv->dev, "Failed to init CVBS encoder\n");
6232-		return ret;
6233-	}
6234-
6235-	encoder->possible_crtcs = BIT(0);
6236-
6237-	drm_connector_attach_encoder(connector, encoder);
6238-
6239-	return 0;
6240-}
6241diff -Naur a/drivers/gpu/drm/meson/meson_venc_cvbs.h b/drivers/gpu/drm/meson/meson_venc_cvbs.h
6242--- a/drivers/gpu/drm/meson/meson_venc_cvbs.h	2022-12-19 17:13:12.677518989 +0800
6243+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.h	1970-01-01 08:00:00.000000000 +0800
6244@@ -1,29 +0,0 @@
6245-/* SPDX-License-Identifier: GPL-2.0-or-later */
6246-/*
6247- * Copyright (C) 2016 BayLibre, SAS
6248- * Author: Neil Armstrong <narmstrong@baylibre.com>
6249- * Copyright (C) 2014 Endless Mobile
6250- *
6251- * Written by:
6252- *     Jasper St. Pierre <jstpierre@mecheye.net>
6253- */
6254-
6255-#ifndef __MESON_VENC_CVBS_H
6256-#define __MESON_VENC_CVBS_H
6257-
6258-#include "meson_drv.h"
6259-#include "meson_venc.h"
6260-
6261-struct meson_cvbs_mode {
6262-	struct meson_cvbs_enci_mode *enci;
6263-	struct drm_display_mode mode;
6264-};
6265-
6266-#define MESON_CVBS_MODES_COUNT	2
6267-
6268-/* Modes supported by the CVBS output */
6269-extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
6270-
6271-int meson_venc_cvbs_create(struct meson_drm *priv);
6272-
6273-#endif /* __MESON_VENC_CVBS_H */
6274diff -Naur a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
6275--- a/drivers/gpu/drm/meson/meson_venc.h	2022-12-19 17:13:12.677518989 +0800
6276+++ b/drivers/gpu/drm/meson/meson_venc.h	2023-02-23 17:02:04.959751069 +0800
6277@@ -21,6 +21,7 @@
6278 	MESON_VENC_MODE_CVBS_PAL,
6279 	MESON_VENC_MODE_CVBS_NTSC,
6280 	MESON_VENC_MODE_HDMI,
6281+	MESON_VENC_MODE_MIPI_DSI,
6282 };
6283
6284 struct meson_cvbs_enci_mode {
6285@@ -47,6 +48,9 @@
6286 	unsigned int analog_sync_adj;
6287 };
6288
6289+/* LCD Encoder gamma setup */
6290+void meson_encl_load_gamma(struct meson_drm *priv);
6291+
6292 /* HDMI Clock parameters */
6293 enum drm_mode_status
6294 meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
6295@@ -63,6 +67,8 @@
6296 			      unsigned int ycrcb_map,
6297 			      bool yuv420_mode,
6298 			      const struct drm_display_mode *mode);
6299+void meson_venc_mipi_dsi_mode_set(struct meson_drm *priv,
6300+				  const struct drm_display_mode *mode);
6301 unsigned int meson_venci_get_field(struct meson_drm *priv);
6302
6303 void meson_venc_enable_vsync(struct meson_drm *priv);
6304diff -Naur a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
6305--- a/drivers/gpu/drm/meson/meson_viu.c	2022-12-19 17:13:12.677518989 +0800
6306+++ b/drivers/gpu/drm/meson/meson_viu.c	2023-02-23 17:02:04.959751069 +0800
6307@@ -78,32 +78,52 @@
6308 	EOTF_COEFF_RIGHTSHIFT /* right shift */
6309 };
6310
6311-static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
6312-					   int *m, bool csc_on)
6313+static void meson_viu_set_g12a_osd_matrix(struct meson_drm *priv,
6314+					  int *m, bool csc_on)
6315 {
6316 	/* VPP WRAP OSD1 matrix */
6317 	writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
6318 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1));
6319+	writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
6320+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_PRE_OFFSET0_1));
6321 	writel(m[2] & 0xfff,
6322 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2));
6323+	writel(m[2] & 0xfff,
6324+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_PRE_OFFSET2));
6325 	writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
6326 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF00_01));
6327+	writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
6328+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_COEF00_01));
6329 	writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
6330 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF02_10));
6331+	writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
6332+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_COEF02_10));
6333 	writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
6334 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
6335+	writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
6336+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_COEF11_12));
6337 	writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
6338 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
6339-	writel((m[11] & 0x1fff),
6340+	writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
6341+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_COEF20_21));
6342+	writel((m[11] & 0x1fff) << 16,
6343 		priv->io_base +	_REG(VPP_WRAP_OSD1_MATRIX_COEF22));
6344+	writel((m[11] & 0x1fff) << 16,
6345+		priv->io_base +	_REG(VPP_WRAP_OSD2_MATRIX_COEF22));
6346
6347 	writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
6348 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET0_1));
6349+	writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
6350+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_OFFSET0_1));
6351 	writel(m[20] & 0xfff,
6352 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET2));
6353+	writel(m[20] & 0xfff,
6354+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_OFFSET2));
6355
6356 	writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
6357 		priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
6358+	writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
6359+		priv->io_base + _REG(VPP_WRAP_OSD2_MATRIX_EN_CTRL));
6360 }
6361
6362 static void meson_viu_set_osd_matrix(struct meson_drm *priv,
6363@@ -114,21 +134,36 @@
6364 		/* osd matrix, VIU_MATRIX_0 */
6365 		writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
6366 			priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET0_1));
6367+		writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
6368+			priv->io_base + _REG(VIU_OSD2_MATRIX_PRE_OFFSET0_1));
6369 		writel(m[2] & 0xfff,
6370 			priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET2));
6371+		writel(m[2] & 0xfff,
6372+			priv->io_base + _REG(VIU_OSD2_MATRIX_PRE_OFFSET2));
6373 		writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
6374 			priv->io_base + _REG(VIU_OSD1_MATRIX_COEF00_01));
6375+		writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
6376+			priv->io_base + _REG(VIU_OSD2_MATRIX_COEF00_01));
6377 		writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
6378 			priv->io_base + _REG(VIU_OSD1_MATRIX_COEF02_10));
6379+		writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
6380+			priv->io_base + _REG(VIU_OSD2_MATRIX_COEF02_10));
6381 		writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
6382 			priv->io_base + _REG(VIU_OSD1_MATRIX_COEF11_12));
6383+		writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
6384+			priv->io_base + _REG(VIU_OSD2_MATRIX_COEF11_12));
6385 		writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
6386 			priv->io_base + _REG(VIU_OSD1_MATRIX_COEF20_21));
6387+		writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
6388+			priv->io_base + _REG(VIU_OSD2_MATRIX_COEF20_21));
6389
6390 		if (m[21]) {
6391 			writel(((m[11] & 0x1fff) << 16) | (m[12] & 0x1fff),
6392 				priv->io_base +
6393 					_REG(VIU_OSD1_MATRIX_COEF22_30));
6394+			writel(((m[11] & 0x1fff) << 16),
6395+				priv->io_base +
6396+					_REG(VIU_OSD2_MATRIX_COEF22));
6397 			writel(((m[13] & 0x1fff) << 16) | (m[14] & 0x1fff),
6398 				priv->io_base +
6399 					_REG(VIU_OSD1_MATRIX_COEF31_32));
6400@@ -137,14 +172,21 @@
6401 					_REG(VIU_OSD1_MATRIX_COEF40_41));
6402 			writel(m[17] & 0x1fff, priv->io_base +
6403 				_REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
6404-		} else
6405+		} else {
6406 			writel((m[11] & 0x1fff) << 16, priv->io_base +
6407 				_REG(VIU_OSD1_MATRIX_COEF22_30));
6408+			writel((m[11] & 0x1fff) << 16, priv->io_base +
6409+				_REG(VIU_OSD2_MATRIX_COEF22));
6410+		}
6411
6412 		writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
6413 			priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET0_1));
6414+		writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
6415+			priv->io_base + _REG(VIU_OSD2_MATRIX_OFFSET0_1));
6416 		writel(m[20] & 0xfff,
6417 			priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET2));
6418+		writel(m[20] & 0xfff,
6419+			priv->io_base + _REG(VIU_OSD2_MATRIX_OFFSET2));
6420
6421 		writel_bits_relaxed(3 << 30, m[21] << 30,
6422 			priv->io_base + _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
6423@@ -154,8 +196,12 @@
6424 		/* 23 reserved for clipping control */
6425 		writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
6426 			priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
6427+		writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
6428+			priv->io_base + _REG(VIU_OSD2_MATRIX_CTRL));
6429 		writel_bits_relaxed(BIT(1), 0,
6430 			priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
6431+		writel_bits_relaxed(BIT(1), 0,
6432+			priv->io_base + _REG(VIU_OSD2_MATRIX_CTRL));
6433 	} else if (m_select == VIU_MATRIX_OSD_EOTF) {
6434 		int i;
6435
6436@@ -426,7 +472,7 @@
6437 	    meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
6438 		meson_viu_load_matrix(priv);
6439 	else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
6440-		meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
6441+		meson_viu_set_g12a_osd_matrix(priv, RGB709_to_YUV709l_coeff,
6442 					       true);
6443 		/* fix green/pink color distortion from vendor u-boot */
6444 		writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
6445@@ -469,14 +515,13 @@
6446 			priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
6447
6448 	if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
6449-		u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
6450-			  (u32)VIU_OSD_BLEND_REORDER(1, 0) |
6451-			  (u32)VIU_OSD_BLEND_REORDER(2, 0) |
6452-			  (u32)VIU_OSD_BLEND_REORDER(3, 0) |
6453-			  (u32)VIU_OSD_BLEND_DIN_EN(1) |
6454-			  (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
6455-			  (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
6456-			  (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
6457+		/* setup bypass to have OSD1->DOUT0 + OSD2->DOUT1 */
6458+		u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) | /* OSD1 to DIN0 */
6459+			  (u32)VIU_OSD_BLEND_REORDER(1, 4) |
6460+			  (u32)VIU_OSD_BLEND_REORDER(2, 4) |
6461+			  (u32)VIU_OSD_BLEND_REORDER(3, 2) | /* OSD2 to DIN3 */
6462+			  (u32)VIU_OSD_BLEND_DIN_EN(9) | /* Enable DIN0 & DIN3 */
6463+			  (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 | /* DIN0 to DOUT0 */
6464 			  (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
6465 			  (u32)VIU_OSD_BLEND_HOLD_LINES(4);
6466 		writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
6467diff -Naur a/drivers/gpu/drm/meson/meson_vpp.h b/drivers/gpu/drm/meson/meson_vpp.h
6468--- a/drivers/gpu/drm/meson/meson_vpp.h	2022-12-19 17:13:12.677518989 +0800
6469+++ b/drivers/gpu/drm/meson/meson_vpp.h	2023-02-23 17:02:04.959751069 +0800
6470@@ -12,6 +12,8 @@
6471 struct drm_rect;
6472 struct meson_drm;
6473
6474+/* Mux VIU/VPP to ENCL */
6475+#define MESON_VIU_VPP_MUX_ENCL	0x0
6476 /* Mux VIU/VPP to ENCI */
6477 #define MESON_VIU_VPP_MUX_ENCI	0x5
6478 /* Mux VIU/VPP to ENCP */
6479diff -Naur a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
6480--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c	2022-12-19 17:13:12.769520092 +0800
6481+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c	2023-02-23 17:02:04.959751069 +0800
6482@@ -81,6 +81,7 @@
6483 }
6484
6485 static struct devfreq_dev_profile panfrost_devfreq_profile = {
6486+	.timer = DEVFREQ_TIMER_DELAYED,
6487 	.polling_ms = 50, /* ~3 frames */
6488 	.target = panfrost_devfreq_target,
6489 	.get_dev_status = panfrost_devfreq_get_dev_status,
6490@@ -134,8 +135,16 @@
6491 	panfrost_devfreq_profile.initial_freq = cur_freq;
6492 	dev_pm_opp_put(opp);
6493
6494+	/*
6495+	* Setup default thresholds for the simple_ondemand governor.
6496+	* The values are chosen based on experiments.
6497+	*/
6498+	pfdevfreq->gov_data.upthreshold = 45;
6499+	pfdevfreq->gov_data.downdifferential = 5;
6500+
6501 	devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
6502-					  DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
6503+					  DEVFREQ_GOV_SIMPLE_ONDEMAND,
6504+					  &pfdevfreq->gov_data);
6505 	if (IS_ERR(devfreq)) {
6506 		DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
6507 		ret = PTR_ERR(devfreq);
6508diff -Naur a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
6509--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h	2022-12-19 17:13:12.769520092 +0800
6510+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h	2023-02-23 17:02:04.959751069 +0800
6511@@ -4,6 +4,7 @@
6512 #ifndef __PANFROST_DEVFREQ_H__
6513 #define __PANFROST_DEVFREQ_H__
6514
6515+#include <linux/devfreq.h>
6516 #include <linux/spinlock.h>
6517 #include <linux/ktime.h>
6518
6519@@ -17,6 +18,7 @@
6520 	struct devfreq *devfreq;
6521 	struct opp_table *regulators_opp_table;
6522 	struct thermal_cooling_device *cooling;
6523+	struct devfreq_simple_ondemand_data gov_data;
6524 	bool opp_of_table_added;
6525
6526 	ktime_t busy_time;
6527diff -Naur a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
6528--- a/drivers/gpu/drm/panfrost/panfrost_job.c	2022-12-19 17:13:12.769520092 +0800
6529+++ b/drivers/gpu/drm/panfrost/panfrost_job.c	2023-02-23 17:02:04.959751069 +0800
6530@@ -157,7 +157,7 @@
6531
6532 	panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
6533
6534-	ret = pm_runtime_get_sync(pfdev->dev);
6535+	ret = pm_runtime_resume_and_get(pfdev->dev);
6536 	if (ret < 0)
6537 		return;
6538
6539diff -Naur a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
6540--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c	2022-12-19 17:13:12.769520092 +0800
6541+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c	2023-02-23 17:02:04.959751069 +0800
6542@@ -632,22 +632,20 @@
6543 {
6544 	struct panfrost_device *pfdev = data;
6545 	u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
6546-	int i, ret;
6547+	int ret;
6548
6549-	for (i = 0; status; i++) {
6550-		u32 mask = BIT(i) | BIT(i + 16);
6551+	while (status) {
6552+		u32 as = ffs(status | (status >> 16)) - 1;
6553+		u32 mask = BIT(as) | BIT(as + 16);
6554 		u64 addr;
6555 		u32 fault_status;
6556 		u32 exception_type;
6557 		u32 access_type;
6558 		u32 source_id;
6559
6560-		if (!(status & mask))
6561-			continue;
6562-
6563-		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
6564-		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
6565-		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
6566+		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
6567+		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
6568+		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
6569
6570 		/* decode the fault status */
6571 		exception_type = fault_status & 0xFF;
6572@@ -658,8 +656,8 @@
6573
6574 		/* Page fault only */
6575 		ret = -1;
6576-		if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
6577-			ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
6578+		if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
6579+			ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
6580
6581 		if (ret)
6582 			/* terminal fault, print info about the fault */
6583@@ -671,7 +669,7 @@
6584 				"exception type 0x%X: %s\n"
6585 				"access type 0x%X: %s\n"
6586 				"source id 0x%X\n",
6587-				i, addr,
6588+				as, addr,
6589 				"TODO",
6590 				fault_status,
6591 				(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
6592@@ -680,6 +678,10 @@
6593 				source_id);
6594
6595 		status &= ~mask;
6596+
6597+		/* If we received new MMU interrupts, process them before returning. */
6598+		if (!status)
6599+			status = mmu_read(pfdev, MMU_INT_RAWSTAT);
6600 	}
6601
6602 	mmu_write(pfdev, MMU_INT_MASK, ~0);
6603diff -Naur a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
6604--- a/drivers/i2c/muxes/i2c-mux-pca954x.c	2022-12-19 17:13:12.917521866 +0800
6605+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c	2023-02-23 17:02:04.959751069 +0800
6606@@ -401,7 +401,7 @@
6607 	else
6608 		data->last_chan = 0; /* Disconnect multiplexer */
6609
6610-	ret = i2c_smbus_write_byte(client, data->last_chan);
6611+	ret = i2c_smbus_write_byte(client, 15);
6612 	if (ret < 0)
6613 		data->last_chan = 0;
6614
6615diff -Naur a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
6616--- a/drivers/input/touchscreen/edt-ft5x06.c	2022-12-19 17:13:13.073523735 +0800
6617+++ b/drivers/input/touchscreen/edt-ft5x06.c	2023-02-23 17:02:04.959751069 +0800
6618@@ -69,6 +69,11 @@
6619 #define EDT_RAW_DATA_RETRIES		100
6620 #define EDT_RAW_DATA_DELAY		1000 /* usec */
6621
6622+#define EDT_DEFAULT_NUM_X		800
6623+#define EDT_DEFAULT_NUM_Y		480
6624+
6625+#define POLL_INTERVAL_MS		17	/* 17ms = 60fps */
6626+
6627 enum edt_pmode {
6628 	EDT_PMODE_NOT_SUPPORTED,
6629 	EDT_PMODE_HIBERNATE,
6630@@ -121,11 +126,15 @@
6631 	int offset_y;
6632 	int report_rate;
6633 	int max_support_points;
6634+	unsigned int known_ids;
6635
6636 	char name[EDT_NAME_LEN];
6637
6638 	struct edt_reg_addr reg_addr;
6639 	enum edt_ver version;
6640+
6641+	struct timer_list timer;
6642+	struct work_struct work_i2c_poll;
6643 };
6644
6645 struct edt_i2c_chip_data {
6646@@ -192,6 +201,10 @@
6647 	int i, type, x, y, id;
6648 	int offset, tplen, datalen, crclen;
6649 	int error;
6650+	unsigned int active_ids = 0, known_ids = tsdata->known_ids;
6651+	long released_ids;
6652+	int b = 0;
6653+	unsigned int num_points;
6654
6655 	switch (tsdata->version) {
6656 	case EDT_M06:
6657@@ -239,9 +252,15 @@
6658
6659 		if (!edt_ft5x06_ts_check_crc(tsdata, rdbuf, datalen))
6660 			goto out;
6661+		num_points = tsdata->max_support_points;
6662+	} else {
6663+		/* Register 2 is TD_STATUS, containing the number of touch
6664+		 * points.
6665+		 */
6666+		num_points = min(rdbuf[2] & 0xf, tsdata->max_support_points);
6667 	}
6668
6669-	for (i = 0; i < tsdata->max_support_points; i++) {
6670+	for (i = 0; i < num_points; i++) {
6671 		u8 *buf = &rdbuf[i * tplen + offset];
6672
6673 		type = buf[0] >> 6;
6674@@ -263,10 +282,25 @@
6675
6676 		input_mt_slot(tsdata->input, id);
6677 		if (input_mt_report_slot_state(tsdata->input, MT_TOOL_FINGER,
6678-					       type != TOUCH_EVENT_UP))
6679+					       type != TOUCH_EVENT_UP)) {
6680 			touchscreen_report_pos(tsdata->input, &tsdata->prop,
6681 					       x, y, true);
6682+			active_ids |= BIT(id);
6683+		} else {
6684+			known_ids &= ~BIT(id);
6685+		}
6686+	}
6687+
6688+	/* One issue with the device is the TOUCH_UP message is not always
6689+	 * returned. Instead track which ids we know about and report when they
6690+	 * are no longer updated
6691+	 */
6692+	released_ids = known_ids & ~active_ids;
6693+	for_each_set_bit_from(b, &released_ids, tsdata->max_support_points) {
6694+		input_mt_slot(tsdata->input, b);
6695+		input_mt_report_slot_inactive(tsdata->input);
6696 	}
6697+	tsdata->known_ids = active_ids;
6698
6699 	input_mt_report_pointer_emulation(tsdata->input, true);
6700 	input_sync(tsdata->input);
6701@@ -275,6 +309,22 @@
6702 	return IRQ_HANDLED;
6703 }
6704
6705+static void edt_ft5x06_ts_irq_poll_timer(struct timer_list *t)
6706+{
6707+	struct edt_ft5x06_ts_data *tsdata = from_timer(tsdata, t, timer);
6708+
6709+	schedule_work(&tsdata->work_i2c_poll);
6710+	mod_timer(&tsdata->timer, jiffies + msecs_to_jiffies(POLL_INTERVAL_MS));
6711+}
6712+
6713+static void edt_ft5x06_ts_work_i2c_poll(struct work_struct *work)
6714+{
6715+	struct edt_ft5x06_ts_data *tsdata = container_of(work,
6716+			struct edt_ft5x06_ts_data, work_i2c_poll);
6717+
6718+	edt_ft5x06_ts_isr(0, tsdata);
6719+}
6720+
6721 static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
6722 				     u8 addr, u8 value)
6723 {
6724@@ -895,6 +945,7 @@
6725 		 * the identification registers.
6726 		 */
6727 		switch (rdbuf[0]) {
6728+		case 0x11:   /* EDT EP0110M09 */
6729 		case 0x35:   /* EDT EP0350M09 */
6730 		case 0x43:   /* EDT EP0430M09 */
6731 		case 0x50:   /* EDT EP0500M09 */
6732@@ -997,17 +1048,14 @@
6733 	if (reg_addr->reg_report_rate != NO_REGISTER)
6734 		tsdata->report_rate = edt_ft5x06_register_read(tsdata,
6735 						reg_addr->reg_report_rate);
6736-	if (tsdata->version == EDT_M06 ||
6737-	    tsdata->version == EDT_M09 ||
6738-	    tsdata->version == EDT_M12) {
6739+	tsdata->num_x = EDT_DEFAULT_NUM_X;
6740+	if (reg_addr->reg_num_x != NO_REGISTER)
6741 		tsdata->num_x = edt_ft5x06_register_read(tsdata,
6742 							 reg_addr->reg_num_x);
6743+	tsdata->num_y = EDT_DEFAULT_NUM_Y;
6744+	if (reg_addr->reg_num_y != NO_REGISTER)
6745 		tsdata->num_y = edt_ft5x06_register_read(tsdata,
6746 							 reg_addr->reg_num_y);
6747-	} else {
6748-		tsdata->num_x = -1;
6749-		tsdata->num_y = -1;
6750-	}
6751 }
6752
6753 static void
6754@@ -1053,10 +1101,13 @@
6755 	case GENERIC_FT:
6756 		/* this is a guesswork */
6757 		reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
6758+		reg_addr->reg_report_rate = NO_REGISTER;
6759 		reg_addr->reg_gain = M09_REGISTER_GAIN;
6760 		reg_addr->reg_offset = M09_REGISTER_OFFSET;
6761 		reg_addr->reg_offset_x = NO_REGISTER;
6762 		reg_addr->reg_offset_y = NO_REGISTER;
6763+		reg_addr->reg_num_x = NO_REGISTER;
6764+		reg_addr->reg_num_y = NO_REGISTER;
6765 		break;
6766 	}
6767 }
6768@@ -1187,7 +1238,7 @@
6769 	edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
6770 	edt_ft5x06_ts_get_parameters(tsdata);
6771
6772-	dev_dbg(&client->dev,
6773+	dev_info(&client->dev,
6774 		"Model \"%s\", Rev. \"%s\", %dx%d sensors\n",
6775 		tsdata->name, fw_version, tsdata->num_x, tsdata->num_y);
6776
6777@@ -1195,20 +1246,10 @@
6778 	input->id.bustype = BUS_I2C;
6779 	input->dev.parent = &client->dev;
6780
6781-	if (tsdata->version == EDT_M06 ||
6782-	    tsdata->version == EDT_M09 ||
6783-	    tsdata->version == EDT_M12) {
6784-		input_set_abs_params(input, ABS_MT_POSITION_X,
6785-				     0, tsdata->num_x * 64 - 1, 0, 0);
6786-		input_set_abs_params(input, ABS_MT_POSITION_Y,
6787-				     0, tsdata->num_y * 64 - 1, 0, 0);
6788-	} else {
6789-		/* Unknown maximum values. Specify via devicetree */
6790-		input_set_abs_params(input, ABS_MT_POSITION_X,
6791-				     0, 65535, 0, 0);
6792-		input_set_abs_params(input, ABS_MT_POSITION_Y,
6793-				     0, 65535, 0, 0);
6794-	}
6795+	input_set_abs_params(input, ABS_MT_POSITION_X,
6796+			     0, tsdata->num_x * 64 - 1, 0, 0);
6797+	input_set_abs_params(input, ABS_MT_POSITION_Y,
6798+			     0, tsdata->num_y * 64 - 1, 0, 0);
6799
6800 	touchscreen_parse_properties(input, true, &tsdata->prop);
6801
6802@@ -1221,17 +1262,27 @@
6803
6804 	i2c_set_clientdata(client, tsdata);
6805
6806-	irq_flags = irq_get_trigger_type(client->irq);
6807-	if (irq_flags == IRQF_TRIGGER_NONE)
6808-		irq_flags = IRQF_TRIGGER_FALLING;
6809-	irq_flags |= IRQF_ONESHOT;
6810-
6811-	error = devm_request_threaded_irq(&client->dev, client->irq,
6812-					NULL, edt_ft5x06_ts_isr, irq_flags,
6813-					client->name, tsdata);
6814-	if (error) {
6815-		dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
6816-		return error;
6817+	if (client->irq) {
6818+		irq_flags = irq_get_trigger_type(client->irq);
6819+		if (irq_flags == IRQF_TRIGGER_NONE)
6820+			irq_flags = IRQF_TRIGGER_FALLING;
6821+		irq_flags |= IRQF_ONESHOT;
6822+
6823+		error = devm_request_threaded_irq(&client->dev, client->irq,
6824+						  NULL, edt_ft5x06_ts_isr,
6825+						  irq_flags, client->name,
6826+						  tsdata);
6827+		if (error) {
6828+			dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
6829+			return error;
6830+		}
6831+	} else {
6832+		INIT_WORK(&tsdata->work_i2c_poll,
6833+			  edt_ft5x06_ts_work_i2c_poll);
6834+		timer_setup(&tsdata->timer, edt_ft5x06_ts_irq_poll_timer, 0);
6835+		tsdata->timer.expires = jiffies +
6836+					msecs_to_jiffies(POLL_INTERVAL_MS);
6837+		add_timer(&tsdata->timer);
6838 	}
6839
6840 	error = devm_device_add_group(&client->dev, &edt_ft5x06_attr_group);
6841@@ -1257,6 +1308,10 @@
6842 {
6843 	struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
6844
6845+	if (!client->irq) {
6846+		del_timer(&tsdata->timer);
6847+		cancel_work_sync(&tsdata->work_i2c_poll);
6848+	}
6849 	edt_ft5x06_ts_teardown_debugfs(tsdata);
6850
6851 	return 0;
6852diff -Naur a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
6853--- a/drivers/irqchip/irq-meson-gpio.c	2022-12-19 17:13:13.097524023 +0800
6854+++ b/drivers/irqchip/irq-meson-gpio.c	2023-02-23 17:02:04.963751125 +0800
6855@@ -15,6 +15,7 @@
6856 #include <linux/irqchip.h>
6857 #include <linux/of.h>
6858 #include <linux/of_address.h>
6859+#include <linux/of_irq.h>
6860
6861 #define NUM_CHANNEL 8
6862 #define MAX_INPUT_MUX 256
6863@@ -136,6 +137,7 @@
6864 struct meson_gpio_irq_controller {
6865 	const struct meson_gpio_irq_params *params;
6866 	void __iomem *base;
6867+	struct irq_domain *domain;
6868 	u32 channel_irqs[NUM_CHANNEL];
6869 	DECLARE_BITMAP(channel_map, NUM_CHANNEL);
6870 	spinlock_t lock;
6871@@ -436,8 +438,8 @@
6872 	.translate	= meson_gpio_irq_domain_translate,
6873 };
6874
6875-static int __init meson_gpio_irq_parse_dt(struct device_node *node,
6876-					  struct meson_gpio_irq_controller *ctl)
6877+static int meson_gpio_irq_parse_dt(struct device_node *node,
6878+				   struct meson_gpio_irq_controller *ctl)
6879 {
6880 	const struct of_device_id *match;
6881 	int ret;
6882@@ -463,63 +465,84 @@
6883 	return 0;
6884 }
6885
6886-static int __init meson_gpio_irq_of_init(struct device_node *node,
6887-					 struct device_node *parent)
6888+static int meson_gpio_intc_probe(struct platform_device *pdev)
6889 {
6890-	struct irq_domain *domain, *parent_domain;
6891+	struct device_node *node = pdev->dev.of_node, *parent;
6892 	struct meson_gpio_irq_controller *ctl;
6893+	struct irq_domain *parent_domain;
6894+	struct resource *res;
6895 	int ret;
6896
6897+	parent = of_irq_find_parent(node);
6898 	if (!parent) {
6899-		pr_err("missing parent interrupt node\n");
6900+		dev_err(&pdev->dev, "missing parent interrupt node\n");
6901 		return -ENODEV;
6902 	}
6903
6904 	parent_domain = irq_find_host(parent);
6905 	if (!parent_domain) {
6906-		pr_err("unable to obtain parent domain\n");
6907+		dev_err(&pdev->dev, "unable to obtain parent domain\n");
6908 		return -ENXIO;
6909 	}
6910
6911-	ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
6912+	ctl = devm_kzalloc(&pdev->dev, sizeof(*ctl), GFP_KERNEL);
6913 	if (!ctl)
6914 		return -ENOMEM;
6915
6916 	spin_lock_init(&ctl->lock);
6917
6918-	ctl->base = of_iomap(node, 0);
6919-	if (!ctl->base) {
6920-		ret = -ENOMEM;
6921-		goto free_ctl;
6922-	}
6923+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6924+	ctl->base = devm_ioremap_resource(&pdev->dev, res);
6925+	if (IS_ERR(ctl->base))
6926+		return PTR_ERR(ctl->base);
6927
6928 	ret = meson_gpio_irq_parse_dt(node, ctl);
6929 	if (ret)
6930-		goto free_channel_irqs;
6931+		return ret;
6932
6933-	domain = irq_domain_create_hierarchy(parent_domain, 0,
6934-					     ctl->params->nr_hwirq,
6935-					     of_node_to_fwnode(node),
6936-					     &meson_gpio_irq_domain_ops,
6937-					     ctl);
6938-	if (!domain) {
6939-		pr_err("failed to add domain\n");
6940-		ret = -ENODEV;
6941-		goto free_channel_irqs;
6942+	ctl->domain = irq_domain_create_hierarchy(parent_domain, 0,
6943+						  ctl->params->nr_hwirq,
6944+						  of_node_to_fwnode(node),
6945+						  &meson_gpio_irq_domain_ops,
6946+						  ctl);
6947+	if (!ctl->domain) {
6948+		dev_err(&pdev->dev, "failed to add domain\n");
6949+		return -ENODEV;
6950 	}
6951
6952-	pr_info("%d to %d gpio interrupt mux initialized\n",
6953-		ctl->params->nr_hwirq, NUM_CHANNEL);
6954+	platform_set_drvdata(pdev, ctl);
6955+
6956+	dev_info(&pdev->dev, "%d to %d gpio interrupt mux initialized\n",
6957+		 ctl->params->nr_hwirq, NUM_CHANNEL);
6958
6959 	return 0;
6960+}
6961
6962-free_channel_irqs:
6963-	iounmap(ctl->base);
6964-free_ctl:
6965-	kfree(ctl);
6966+static int meson_gpio_intc_remove(struct platform_device *pdev)
6967+{
6968+	struct meson_gpio_irq_controller *ctl = platform_get_drvdata(pdev);
6969
6970-	return ret;
6971+	irq_domain_remove(ctl->domain);
6972+
6973+	return 0;
6974 }
6975
6976-IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
6977-		meson_gpio_irq_of_init);
6978+static const struct of_device_id meson_gpio_intc_of_match[] = {
6979+	{ .compatible = "amlogic,meson-gpio-intc", },
6980+	{},
6981+};
6982+MODULE_DEVICE_TABLE(of, meson_gpio_intc_of_match);
6983+
6984+static struct platform_driver meson_gpio_intc_driver = {
6985+	.probe  = meson_gpio_intc_probe,
6986+	.remove = meson_gpio_intc_remove,
6987+	.driver = {
6988+		.name = "meson-gpio-intc",
6989+		.of_match_table = meson_gpio_intc_of_match,
6990+	},
6991+};
6992+module_platform_driver(meson_gpio_intc_driver);
6993+
6994+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
6995+MODULE_LICENSE("GPL v2");
6996+MODULE_ALIAS("platform:meson-gpio-intc");
6997diff -Naur a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
6998--- a/drivers/mmc/core/block.c	2022-12-19 17:13:13.441528145 +0800
6999+++ b/drivers/mmc/core/block.c	2023-02-23 17:02:04.963751125 +0800
7000@@ -44,7 +44,9 @@
7001 #include <linux/mmc/host.h>
7002 #include <linux/mmc/mmc.h>
7003 #include <linux/mmc/sd.h>
7004-
7005+#ifdef CONFIG_MMC_MESON_GX
7006+#include <linux/mmc/emmc_partitions.h>
7007+#endif
7008 #include <linux/uaccess.h>
7009
7010 #include "queue.h"
7011@@ -2896,6 +2898,9 @@
7012 {
7013 	struct mmc_blk_data *md, *part_md;
7014 	char cap_str[10];
7015+#ifdef CONFIG_MMC_MESON_GX
7016+	int idx = 0;
7017+#endif
7018
7019 	/*
7020 	 * Check that the card supports the command class(es) we need.
7021@@ -2930,9 +2935,17 @@
7022 	if (mmc_add_disk(md))
7023 		goto out;
7024
7025+#ifdef CONFIG_MMC_MESON_GX
7026+	aml_emmc_partition_ops(card, md->disk);
7027+#endif
7028+
7029 	list_for_each_entry(part_md, &md->part, part) {
7030 		if (mmc_add_disk(part_md))
7031 			goto out;
7032+#ifdef CONFIG_MMC_MESON_GX
7033+		if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT)
7034+			add_fake_boot_partition(part_md->disk, "bootloader%d", idx++);
7035+#endif
7036 	}
7037
7038 	/* Add two debugfs entries */
7039diff -Naur a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
7040--- a/drivers/mmc/host/meson-gx-mmc.c	2022-12-19 17:13:13.449528241 +0800
7041+++ b/drivers/mmc/host/meson-gx-mmc.c	2023-02-23 17:02:04.963751125 +0800
7042@@ -27,6 +27,7 @@
7043 #include <linux/interrupt.h>
7044 #include <linux/bitfield.h>
7045 #include <linux/pinctrl/consumer.h>
7046+#include "../../mmc/core/core.h"
7047
7048 #define DRIVER_NAME "meson-gx-mmc"
7049
7050@@ -38,6 +39,7 @@
7051 #define   CLK_RX_PHASE_MASK GENMASK(13, 12)
7052 #define   CLK_PHASE_0 0
7053 #define   CLK_PHASE_180 2
7054+#define   CLK_PHASE_270 3
7055 #define   CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
7056 #define   CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
7057 #define   CLK_V2_ALWAYS_ON BIT(24)
7058@@ -136,6 +138,7 @@
7059 	unsigned int rx_delay_mask;
7060 	unsigned int always_on;
7061 	unsigned int adjust;
7062+	unsigned int clk_core_phase;
7063 };
7064
7065 struct sd_emmc_desc {
7066@@ -417,7 +420,7 @@
7067 	/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
7068 	clk_reg = CLK_ALWAYS_ON(host);
7069 	clk_reg |= CLK_DIV_MASK;
7070-	clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
7071+	clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->data->clk_core_phase);
7072 	clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
7073 	clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
7074 	writel(clk_reg, host->regs + SD_EMMC_CLOCK);
7075@@ -1121,6 +1124,43 @@
7076 	.start_signal_voltage_switch = meson_mmc_voltage_switch,
7077 };
7078
7079+struct mmc_host *sdio_host;
7080+
7081+static void sdio_rescan(struct mmc_host *mmc)
7082+{
7083+	int ret;
7084+
7085+	mmc->rescan_entered = 0;
7086+
7087+	/*mmc->host_rescan_disable = false;*/
7088+	mmc_detect_change(mmc, 0);
7089+
7090+	/* start the delayed_work */
7091+	ret = flush_work(&(mmc->detect.work));
7092+	if (!ret)
7093+		pr_info("Error: sdio_rescan() already idle!\n");
7094+}
7095+
7096+void sdio_reinit(void)
7097+{
7098+	mmc_get_card(sdio_host->card, NULL);
7099+	if (sdio_host) {
7100+		if (sdio_host->card) {
7101+			pr_info("[%s] sdio hw_reset\n", __func__);
7102+			sdio_host->bus_ops->hw_reset(sdio_host);
7103+		}
7104+		else {
7105+			sdio_rescan(sdio_host);
7106+		}
7107+	} else {
7108+		pr_info("Error: sdio_host is NULL\n");
7109+	}
7110+	mmc_put_card(sdio_host->card, NULL);
7111+
7112+	pr_info("[%s] finish\n", __func__);
7113+}
7114+EXPORT_SYMBOL(sdio_reinit);
7115+
7116 static int meson_mmc_probe(struct platform_device *pdev)
7117 {
7118 	struct resource *res;
7119@@ -1279,6 +1319,11 @@
7120 		goto err_bounce_buf;
7121 	}
7122
7123+	/*sdio*/
7124+	if (mmc->pm_caps & MMC_PM_KEEP_POWER) {
7125+		sdio_host = mmc;
7126+	}
7127+
7128 	mmc->ops = &meson_mmc_ops;
7129 	mmc_add_host(mmc);
7130
7131@@ -1328,6 +1373,7 @@
7132 	.rx_delay_mask	= CLK_V2_RX_DELAY_MASK,
7133 	.always_on	= CLK_V2_ALWAYS_ON,
7134 	.adjust		= SD_EMMC_ADJUST,
7135+	.clk_core_phase	= CLK_PHASE_180,
7136 };
7137
7138 static const struct meson_mmc_data meson_axg_data = {
7139@@ -1335,6 +1381,7 @@
7140 	.rx_delay_mask	= CLK_V3_RX_DELAY_MASK,
7141 	.always_on	= CLK_V3_ALWAYS_ON,
7142 	.adjust		= SD_EMMC_V3_ADJUST,
7143+	.clk_core_phase	= CLK_PHASE_270,
7144 };
7145
7146 static const struct of_device_id meson_mmc_of_match[] = {
7147diff -Naur a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
7148--- a/drivers/pinctrl/meson/pinctrl-meson.c	2022-12-19 17:13:14.201537253 +0800
7149+++ b/drivers/pinctrl/meson/pinctrl-meson.c	2023-02-23 17:02:04.963751125 +0800
7150@@ -56,6 +56,24 @@
7151 #include "../pinctrl-utils.h"
7152 #include "pinctrl-meson.h"
7153
7154+static int meson_memory_duplicate(struct platform_device *pdev, void **addr,
7155+				  size_t n, size_t size)
7156+{
7157+	void *mem;
7158+
7159+	if (!(*addr))
7160+		return -EINVAL;
7161+
7162+	mem = devm_kzalloc(&pdev->dev, size * n, GFP_KERNEL);
7163+	if (!mem)
7164+		return -ENOMEM;
7165+
7166+	memcpy(mem, *addr, size * n);
7167+	*addr = mem;
7168+
7169+	return 0;
7170+};
7171+
7172 static const unsigned int meson_bit_strides[] = {
7173 	1, 1, 1, 1, 1, 2, 1
7174 };
7175@@ -152,6 +170,7 @@
7176
7177 	return pc->data->num_funcs;
7178 }
7179+EXPORT_SYMBOL(meson_pmx_get_funcs_count);
7180
7181 const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
7182 				    unsigned selector)
7183@@ -160,6 +179,7 @@
7184
7185 	return pc->data->funcs[selector].name;
7186 }
7187+EXPORT_SYMBOL(meson_pmx_get_func_name);
7188
7189 int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
7190 			 const char * const **groups,
7191@@ -172,6 +192,7 @@
7192
7193 	return 0;
7194 }
7195+EXPORT_SYMBOL(meson_pmx_get_groups);
7196
7197 static int meson_pinconf_set_gpio_bit(struct meson_pinctrl *pc,
7198 				      unsigned int pin,
7199@@ -248,11 +269,11 @@
7200 {
7201 	int ret;
7202
7203-	ret = meson_pinconf_set_output(pc, pin, true);
7204+	ret = meson_pinconf_set_drive(pc, pin, high);
7205 	if (ret)
7206 		return ret;
7207
7208-	return meson_pinconf_set_drive(pc, pin, high);
7209+	return meson_pinconf_set_output(pc, pin, true);
7210 }
7211
7212 static int meson_pinconf_disable_bias(struct meson_pinctrl *pc,
7213@@ -355,6 +376,7 @@
7214
7215 		switch (param) {
7216 		case PIN_CONFIG_DRIVE_STRENGTH_UA:
7217+		case PIN_CONFIG_INPUT_ENABLE:
7218 		case PIN_CONFIG_OUTPUT_ENABLE:
7219 		case PIN_CONFIG_OUTPUT:
7220 			arg = pinconf_to_config_argument(configs[i]);
7221@@ -383,6 +405,9 @@
7222 		case PIN_CONFIG_OUTPUT:
7223 			ret = meson_pinconf_set_output_drive(pc, pin, arg);
7224 			break;
7225+		case PIN_CONFIG_INPUT_ENABLE:
7226+			ret = meson_pinconf_set_output(pc, pin, !arg);
7227+			break;
7228 		default:
7229 			ret = -ENOTSUPP;
7230 		}
7231@@ -598,9 +623,42 @@
7232 	return !!(val & BIT(bit));
7233 }
7234
7235+static int meson_gpio_to_irq(struct gpio_chip *chip, unsigned int gpio)
7236+{
7237+	struct meson_pinctrl *pc = gpiochip_get_data(chip);
7238+	struct meson_bank *bank;
7239+	struct irq_fwspec fwspec;
7240+	int hwirq;
7241+
7242+	if (meson_get_bank(pc, gpio, &bank))
7243+		return -EINVAL;
7244+
7245+	if (bank->irq_first < 0) {
7246+		dev_warn(pc->dev, "no support irq for pin[%d]\n", gpio);
7247+		return -EINVAL;
7248+	}
7249+
7250+	if (!pc->of_irq) {
7251+		dev_err(pc->dev, "invalid device node of gpio INTC\n");
7252+		return -EINVAL;
7253+	}
7254+
7255+	hwirq = gpio - bank->first + bank->irq_first;
7256+
7257+	fwspec.fwnode = of_node_to_fwnode(pc->of_irq);
7258+	fwspec.param_count = 2;
7259+	fwspec.param[0] = hwirq;
7260+	fwspec.param[1] = IRQ_TYPE_NONE;
7261+
7262+	return irq_create_fwspec_mapping(&fwspec);
7263+}
7264+
7265 static int meson_gpiolib_register(struct meson_pinctrl *pc)
7266 {
7267 	int ret;
7268+	const char **names;
7269+	const struct pinctrl_pin_desc *pins;
7270+	int i;
7271
7272 	pc->chip.label = pc->data->name;
7273 	pc->chip.parent = pc->dev;
7274@@ -612,6 +670,13 @@
7275 	pc->chip.direction_output = meson_gpio_direction_output;
7276 	pc->chip.get = meson_gpio_get;
7277 	pc->chip.set = meson_gpio_set;
7278+	pc->chip.to_irq = meson_gpio_to_irq;
7279+	pc->chip.set_config = gpiochip_generic_config;
7280+	names = kcalloc(pc->desc.npins, sizeof(char *), GFP_KERNEL);
7281+	pins = pc->desc.pins;
7282+	for (i = 0; i < pc->desc.npins; i++)
7283+		names[pins[i].number] = pins[i].name;
7284+	pc->chip.names = (const char * const *)names;
7285 	pc->chip.base = -1;
7286 	pc->chip.ngpio = pc->data->num_pins;
7287 	pc->chip.can_sleep = false;
7288@@ -619,6 +684,16 @@
7289 	pc->chip.of_gpio_n_cells = 2;
7290
7291 	ret = gpiochip_add_data(&pc->chip, pc);
7292+
7293+	/* pin->chip.names will be assigned to each gpio discriptor' name
7294+	 * member after gpiochip_add_data. To keep node name consistency when
7295+	 * use sysfs to export gpio, pc->chip.name need to be cleared also see
7296+	 * gpiod_export->device_create_with_groups.
7297+	 */
7298+	kfree(names);
7299+	names = NULL;
7300+	pc->chip.names = NULL;
7301+
7302 	if (ret) {
7303 		dev_err(pc->dev, "can't add gpio chip %s\n",
7304 			pc->data->name);
7305@@ -681,6 +756,13 @@
7306 	}
7307
7308 	pc->of_node = gpio_np;
7309+	pc->of_irq = of_find_compatible_node(NULL,
7310+					     NULL,
7311+					     "amlogic,meson-gpio-intc-ext");
7312+	if (!pc->of_irq)
7313+		pc->of_irq = of_find_compatible_node(NULL,
7314+						     NULL,
7315+						     "amlogic,meson-gpio-intc");
7316
7317 	pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
7318 	if (IS_ERR_OR_NULL(pc->reg_mux)) {
7319@@ -750,6 +832,16 @@
7320 	if (ret)
7321 		return ret;
7322
7323+	ret = meson_memory_duplicate(pdev, (void **)&pc->data->groups, pc->data->num_groups,
7324+				     sizeof(struct meson_pmx_group));
7325+	if (ret)
7326+		return ret;
7327+
7328+	ret = meson_memory_duplicate(pdev, (void **)&pc->data->funcs, pc->data->num_funcs,
7329+				     sizeof(struct meson_pmx_func));
7330+	if (ret)
7331+		return ret;
7332+
7333 	pc->desc.name		= "pinctrl-meson";
7334 	pc->desc.owner		= THIS_MODULE;
7335 	pc->desc.pctlops	= &meson_pctrl_ops;
7336@@ -766,3 +858,4 @@
7337
7338 	return meson_gpiolib_register(pc);
7339 }
7340+EXPORT_SYMBOL(meson_pinctrl_probe);
7341diff -Naur a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
7342--- a/drivers/pinctrl/meson/pinctrl-meson.h	2022-12-19 17:13:14.201537253 +0800
7343+++ b/drivers/pinctrl/meson/pinctrl-meson.h	2023-02-23 17:02:04.963751125 +0800
7344@@ -130,6 +130,7 @@
7345 	struct regmap *reg_gpio;
7346 	struct regmap *reg_ds;
7347 	struct gpio_chip chip;
7348+	struct device_node *of_irq;
7349 	struct device_node *of_node;
7350 };
7351
7352diff -Naur a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
7353--- a/drivers/soc/amlogic/meson-canvas.c	2022-12-19 17:13:14.497540799 +0800
7354+++ b/drivers/soc/amlogic/meson-canvas.c	2023-02-23 17:02:04.963751125 +0800
7355@@ -148,6 +148,24 @@
7356 }
7357 EXPORT_SYMBOL_GPL(meson_canvas_alloc);
7358
7359+int meson_canvas_alloc_spec(struct meson_canvas *canvas, u8 canvas_index)
7360+{
7361+	unsigned long flags;
7362+
7363+	spin_lock_irqsave(&canvas->lock, flags);
7364+	if (!canvas->used[canvas_index]) {
7365+		canvas->used[canvas_index] = 1;
7366+		spin_unlock_irqrestore(&canvas->lock, flags);
7367+		return 0;
7368+	}
7369+	spin_unlock_irqrestore(&canvas->lock, flags);
7370+
7371+	dev_err(canvas->dev, "Canvas (%u) is busy\n", canvas_index);
7372+	return -EBUSY;
7373+}
7374+EXPORT_SYMBOL_GPL(meson_canvas_alloc_spec);
7375+
7376+
7377 int meson_canvas_free(struct meson_canvas *canvas, u8 canvas_index)
7378 {
7379 	unsigned long flags;
7380diff -Naur a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
7381--- a/drivers/spi/spi-meson-spicc.c	2023-02-23 10:03:45.602260183 +0800
7382+++ b/drivers/spi/spi-meson-spicc.c	2023-02-23 17:05:24.131300901 +0800
7383@@ -156,7 +156,6 @@
7384 	void __iomem			*base;
7385 	struct clk			*core;
7386 	struct clk			*pclk;
7387-	struct clk_divider		pow2_div;
7388 	struct clk			*clk;
7389 	struct spi_message		*message;
7390 	struct spi_transfer		*xfer;
7391@@ -169,8 +168,6 @@
7392 	unsigned long			xfer_remain;
7393 };
7394
7395-#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
7396-
7397 static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
7398 {
7399 	u32 conf;
7400@@ -424,7 +421,7 @@
7401 {
7402 	struct meson_spicc_device *spicc = spi_master_get_devdata(master);
7403 	struct spi_device *spi = message->spi;
7404-	u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
7405+	u32 conf = 0;
7406
7407 	/* Store current message */
7408 	spicc->message = message;
7409@@ -461,6 +458,8 @@
7410 	/* Select CS */
7411 	conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
7412
7413+	/* Default Clock rate core/4 */
7414+
7415 	/* Default 8bit word */
7416 	conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
7417
7418@@ -477,16 +476,12 @@
7419 static int meson_spicc_unprepare_transfer(struct spi_master *master)
7420 {
7421 	struct meson_spicc_device *spicc = spi_master_get_devdata(master);
7422-	u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK;
7423
7424 	/* Disable all IRQs */
7425 	writel(0, spicc->base + SPICC_INTREG);
7426
7427 	device_reset_optional(&spicc->pdev->dev);
7428
7429-	/* Set default configuration, keeping datarate field */
7430-	writel_relaxed(conf, spicc->base + SPICC_CONREG);
7431-
7432 	return 0;
7433 }
7434
7435@@ -523,60 +518,14 @@
7436  * Clk path for G12A series:
7437  *    pclk -> pow2 fixed div -> pow2 div -> mux -> out
7438  *    pclk -> enh fixed div -> enh div -> mux -> out
7439- *
7440- * The pow2 divider is tied to the controller HW state, and the
7441- * divider is only valid when the controller is initialized.
7442- *
7443- * A set of clock ops is added to make sure we don't read/set this
7444- * clock rate while the controller is in an unknown state.
7445  */
7446
7447-static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
7448-						  unsigned long parent_rate)
7449-{
7450-	struct clk_divider *divider = to_clk_divider(hw);
7451-	struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
7452-
7453-	if (!spicc->master->cur_msg)
7454-		return 0;
7455-
7456-	return clk_divider_ops.recalc_rate(hw, parent_rate);
7457-}
7458-
7459-static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
7460-					   struct clk_rate_request *req)
7461-{
7462-	struct clk_divider *divider = to_clk_divider(hw);
7463-	struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
7464-
7465-	if (!spicc->master->cur_msg)
7466-		return -EINVAL;
7467-
7468-	return clk_divider_ops.determine_rate(hw, req);
7469-}
7470-
7471-static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
7472-				     unsigned long parent_rate)
7473-{
7474-	struct clk_divider *divider = to_clk_divider(hw);
7475-	struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
7476-
7477-	if (!spicc->master->cur_msg)
7478-		return -EINVAL;
7479-
7480-	return clk_divider_ops.set_rate(hw, rate, parent_rate);
7481-}
7482-
7483-const struct clk_ops meson_spicc_pow2_clk_ops = {
7484-	.recalc_rate = meson_spicc_pow2_recalc_rate,
7485-	.determine_rate = meson_spicc_pow2_determine_rate,
7486-	.set_rate = meson_spicc_pow2_set_rate,
7487-};
7488-
7489-static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc)
7490+static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
7491 {
7492 	struct device *dev = &spicc->pdev->dev;
7493-	struct clk_fixed_factor *pow2_fixed_div;
7494+	struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
7495+	struct clk_divider *pow2_div, *enh_div;
7496+	struct clk_mux *mux;
7497 	struct clk_init_data init;
7498 	struct clk *clk;
7499 	struct clk_parent_data parent_data[2];
7500@@ -611,45 +560,31 @@
7501 	if (WARN_ON(IS_ERR(clk)))
7502 		return PTR_ERR(clk);
7503
7504+	pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
7505+	if (!pow2_div)
7506+		return -ENOMEM;
7507+
7508 	snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
7509 	init.name = name;
7510-	init.ops = &meson_spicc_pow2_clk_ops;
7511-	/*
7512-	 * Set NOCACHE here to make sure we read the actual HW value
7513-	 * since we reset the HW after each transfer.
7514-	 */
7515-	init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
7516+	init.ops = &clk_divider_ops;
7517+	init.flags = CLK_SET_RATE_PARENT;
7518 	parent_data[0].hw = &pow2_fixed_div->hw;
7519 	init.num_parents = 1;
7520
7521-	spicc->pow2_div.shift = 16,
7522-	spicc->pow2_div.width = 3,
7523-	spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO,
7524-	spicc->pow2_div.reg = spicc->base + SPICC_CONREG;
7525-	spicc->pow2_div.hw.init = &init;
7526-
7527-	spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw);
7528-	if (WARN_ON(IS_ERR(spicc->clk)))
7529-		return PTR_ERR(spicc->clk);
7530-
7531-	return 0;
7532-}
7533-
7534-static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc)
7535-{
7536-	struct device *dev = &spicc->pdev->dev;
7537-	struct clk_fixed_factor *enh_fixed_div;
7538-	struct clk_divider *enh_div;
7539-	struct clk_mux *mux;
7540-	struct clk_init_data init;
7541-	struct clk *clk;
7542-	struct clk_parent_data parent_data[2];
7543-	char name[64];
7544+	pow2_div->shift = 16,
7545+	pow2_div->width = 3,
7546+	pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
7547+	pow2_div->reg = spicc->base + SPICC_CONREG;
7548+	pow2_div->hw.init = &init;
7549
7550-	memset(&init, 0, sizeof(init));
7551-	memset(&parent_data, 0, sizeof(parent_data));
7552+	clk = devm_clk_register(dev, &pow2_div->hw);
7553+	if (WARN_ON(IS_ERR(clk)))
7554+		return PTR_ERR(clk);
7555
7556-	init.parent_data = parent_data;
7557+	if (!spicc->data->has_enhance_clk_div) {
7558+		spicc->clk = clk;
7559+		return 0;
7560+	}
7561
7562 	/* algorithm for enh div: rate = freq / 2 / (N + 1) */
7563
7564@@ -702,7 +637,7 @@
7565 	snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
7566 	init.name = name;
7567 	init.ops = &clk_mux_ops;
7568-	parent_data[0].hw = &spicc->pow2_div.hw;
7569+	parent_data[0].hw = &pow2_div->hw;
7570 	parent_data[1].hw = &enh_div->hw;
7571 	init.num_parents = 2;
7572 	init.flags = CLK_SET_RATE_PARENT;
7573@@ -758,11 +693,6 @@
7574 	writel_relaxed(0, spicc->base + SPICC_INTREG);
7575
7576 	irq = platform_get_irq(pdev, 0);
7577-	if (irq < 0) {
7578-		ret = irq;
7579-		goto out_master;
7580-	}
7581-
7582 	ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
7583 			       0, NULL, spicc);
7584 	if (ret) {
7585@@ -819,20 +749,12 @@
7586
7587 	meson_spicc_oen_enable(spicc);
7588
7589-	ret = meson_spicc_pow2_clk_init(spicc);
7590+	ret = meson_spicc_clk_init(spicc);
7591 	if (ret) {
7592-		dev_err(&pdev->dev, "pow2 clock registration failed\n");
7593+		dev_err(&pdev->dev, "clock registration failed\n");
7594 		goto out_clk;
7595 	}
7596
7597-	if (spicc->data->has_enhance_clk_div) {
7598-		ret = meson_spicc_enh_clk_init(spicc);
7599-		if (ret) {
7600-			dev_err(&pdev->dev, "clock registration failed\n");
7601-			goto out_clk;
7602-		}
7603-	}
7604-
7605 	ret = devm_spi_register_master(&pdev->dev, master);
7606 	if (ret) {
7607 		dev_err(&pdev->dev, "spi master registration failed\n");
7608diff -Naur a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
7609--- a/drivers/usb/dwc3/dwc3-meson-g12a.c	2022-12-19 17:13:14.765544011 +0800
7610+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c	2023-02-23 17:02:04.963751125 +0800
7611@@ -269,6 +269,59 @@
7612 	const struct dwc3_meson_g12a_drvdata *drvdata;
7613 };
7614
7615+union u2p_r0_v2 {
7616+	/** raw register data */
7617+	uint32_t d32;
7618+	/** register bits */
7619+	struct {
7620+		unsigned host_device:1;
7621+		unsigned power_ok:1;
7622+		unsigned hast_mode:1;
7623+		unsigned power_on_reset:1;
7624+		unsigned id_pullup:1;
7625+		unsigned drv_vbus:1;
7626+		unsigned reserved:26;
7627+	} b;
7628+};
7629+
7630+static ssize_t phy_meson_g12a_usb_mode_show(struct device *dev,
7631+				struct device_attribute *attr,
7632+				char *buf)
7633+{
7634+    struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
7635+    union u2p_r0_v2 reg0;
7636+
7637+    regmap_read(priv->u2p_regmap[1], U2P_R0, &reg0.d32);
7638+    printk(KERN_INFO "USB Mode is %s\n", reg0.b.id_pullup ? "OTG" : "Host");
7639+
7640+	return 0;
7641+}
7642+
7643+static ssize_t phy_meson_g12a_usb_mode_store(struct device *dev,
7644+				struct device_attribute *attr,
7645+				const char *buf, size_t count)
7646+{
7647+    struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
7648+	int id_pullup = 0;
7649+
7650+    id_pullup = simple_strtoul(buf, NULL, 16);
7651+    printk(KERN_INFO "USB Mode : %s\n", id_pullup ? "OTG" : "Host");
7652+
7653+    if (id_pullup) {
7654+        regmap_update_bits(priv->u2p_regmap[1], U2P_R0,
7655+                    U2P_R0_HOST_DEVICE | U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
7656+                    U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
7657+    } else {
7658+        regmap_update_bits(priv->u2p_regmap[1], U2P_R0,
7659+                    U2P_R0_HOST_DEVICE | U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
7660+                    U2P_R0_HOST_DEVICE | U2P_R0_DRV_VBUS);
7661+    }
7662+
7663+	return count;
7664+}
7665+
7666+static DEVICE_ATTR(usb_mode, 0664, phy_meson_g12a_usb_mode_show, phy_meson_g12a_usb_mode_store);
7667+
7668 static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
7669 					 int i, enum phy_mode mode)
7670 {
7671@@ -436,6 +489,10 @@
7672
7673 	dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
7674
7675+    ret = device_create_file(priv->dev, &dev_attr_usb_mode);
7676+    if (ret != 0)
7677+        dev_err(priv->dev, "failed create usb mode file\n");
7678+
7679 	return 0;
7680 }
7681
7682diff -Naur a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
7683--- a/drivers/usb/gadget/composite.c	2022-12-19 17:13:14.769544059 +0800
7684+++ b/drivers/usb/gadget/composite.c	2023-02-23 17:02:04.963751125 +0800
7685@@ -2061,7 +2061,7 @@
7686 	return value;
7687 }
7688
7689-void composite_disconnect(struct usb_gadget *gadget)
7690+static void __composite_disconnect(struct usb_gadget *gadget)
7691 {
7692 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
7693 	unsigned long			flags;
7694@@ -2078,6 +2078,23 @@
7695 	spin_unlock_irqrestore(&cdev->lock, flags);
7696 }
7697
7698+void composite_disconnect(struct usb_gadget *gadget)
7699+{
7700+	usb_gadget_vbus_draw(gadget, 0);
7701+	__composite_disconnect(gadget);
7702+}
7703+
7704+void composite_reset(struct usb_gadget *gadget)
7705+{
7706+	/*
7707+	 * Section 1.4.13 Standard Downstream Port of the USB battery charging
7708+	 * specification v1.2 states that a device connected on a SDP shall only
7709+	 * draw at max 100mA while in a connected, but unconfigured state.
7710+	 */
7711+	usb_gadget_vbus_draw(gadget, 100);
7712+	__composite_disconnect(gadget);
7713+}
7714+
7715 /*-------------------------------------------------------------------------*/
7716
7717 static ssize_t suspended_show(struct device *dev, struct device_attribute *attr,
7718@@ -2398,7 +2415,7 @@
7719 	.unbind		= composite_unbind,
7720
7721 	.setup		= composite_setup,
7722-	.reset		= composite_disconnect,
7723+	.reset		= composite_reset,
7724 	.disconnect	= composite_disconnect,
7725
7726 	.suspend	= composite_suspend,
7727diff -Naur a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
7728--- a/drivers/usb/gadget/configfs.c	2022-12-19 17:13:14.769544059 +0800
7729+++ b/drivers/usb/gadget/configfs.c	2023-02-23 17:02:04.963751125 +0800
7730@@ -10,6 +10,32 @@
7731 #include "u_f.h"
7732 #include "u_os_desc.h"
7733
7734+#ifdef CONFIG_USB_CONFIGFS_UEVENT
7735+#include <linux/platform_device.h>
7736+#include <linux/kdev_t.h>
7737+#include <linux/usb/ch9.h>
7738+
7739+#ifdef CONFIG_USB_CONFIGFS_F_ACC
7740+extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
7741+				const struct usb_ctrlrequest *ctrl);
7742+void acc_disconnect(void);
7743+#endif
7744+static struct class *android_class;
7745+static struct device *android_device;
7746+static int index;
7747+static int gadget_index;
7748+
7749+struct device *create_function_device(char *name)
7750+{
7751+	if (android_device && !IS_ERR(android_device))
7752+		return device_create(android_class, android_device,
7753+			MKDEV(0, index++), NULL, name);
7754+	else
7755+		return ERR_PTR(-EINVAL);
7756+}
7757+EXPORT_SYMBOL_GPL(create_function_device);
7758+#endif
7759+
7760 int check_user_usb_string(const char *name,
7761 		struct usb_gadget_strings *stringtab_dev)
7762 {
7763@@ -51,6 +77,12 @@
7764 	char qw_sign[OS_STRING_QW_SIGN_LEN];
7765 	spinlock_t spinlock;
7766 	bool unbind;
7767+#ifdef CONFIG_USB_CONFIGFS_UEVENT
7768+	bool connected;
7769+	bool sw_connected;
7770+	struct work_struct work;
7771+	struct device *dev;
7772+#endif
7773 };
7774
7775 static inline struct gadget_info *to_gadget_info(struct config_item *item)
7776@@ -272,7 +304,7 @@
7777
7778 	mutex_lock(&gi->lock);
7779
7780-	if (!strlen(name)) {
7781+	if (!strlen(name) || strcmp(name, "none") == 0) {
7782 		ret = unregister_gadget(gi);
7783 		if (ret)
7784 			goto err;
7785@@ -1270,6 +1302,9 @@
7786 					f->name, f);
7787 				f->unbind(c, f);
7788 			}
7789+
7790+			if (f->bind_deactivated)
7791+				usb_function_activate(f);
7792 		}
7793 		c->next_interface_id = 0;
7794 		memset(c->interface, 0, sizeof(c->interface));
7795@@ -1422,6 +1457,57 @@
7796 	return ret;
7797 }
7798
7799+#ifdef CONFIG_USB_CONFIGFS_UEVENT
7800+static void android_work(struct work_struct *data)
7801+{
7802+	struct gadget_info *gi = container_of(data, struct gadget_info, work);
7803+	struct usb_composite_dev *cdev = &gi->cdev;
7804+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
7805+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
7806+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
7807+	/* 0-connected 1-configured 2-disconnected*/
7808+	bool status[3] = { false, false, false };
7809+	unsigned long flags;
7810+	bool uevent_sent = false;
7811+
7812+	spin_lock_irqsave(&cdev->lock, flags);
7813+	if (cdev->config)
7814+		status[1] = true;
7815+
7816+	if (gi->connected != gi->sw_connected) {
7817+		if (gi->connected)
7818+			status[0] = true;
7819+		else
7820+			status[2] = true;
7821+		gi->sw_connected = gi->connected;
7822+	}
7823+	spin_unlock_irqrestore(&cdev->lock, flags);
7824+
7825+	if (status[0]) {
7826+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, connected);
7827+		pr_info("%s: sent uevent %s\n", __func__, connected[0]);
7828+		uevent_sent = true;
7829+	}
7830+
7831+	if (status[1]) {
7832+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, configured);
7833+		pr_info("%s: sent uevent %s\n", __func__, configured[0]);
7834+		uevent_sent = true;
7835+	}
7836+
7837+	if (status[2]) {
7838+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, disconnected);
7839+		pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
7840+		uevent_sent = true;
7841+	}
7842+
7843+	if (!uevent_sent) {
7844+		pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
7845+			gi->connected, gi->sw_connected, cdev->config);
7846+	}
7847+}
7848+#endif
7849+
7850 static void configfs_composite_unbind(struct usb_gadget *gadget)
7851 {
7852 	struct usb_composite_dev	*cdev;
7853@@ -1449,6 +1535,51 @@
7854 	spin_unlock_irqrestore(&gi->spinlock, flags);
7855 }
7856
7857+#ifdef CONFIG_USB_CONFIGFS_UEVENT
7858+static int android_setup(struct usb_gadget *gadget,
7859+			const struct usb_ctrlrequest *c)
7860+{
7861+	struct usb_composite_dev *cdev = get_gadget_data(gadget);
7862+	unsigned long flags;
7863+	struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
7864+	int value = -EOPNOTSUPP;
7865+	struct usb_function_instance *fi;
7866+
7867+	spin_lock_irqsave(&cdev->lock, flags);
7868+	if (c->bRequest == USB_REQ_GET_DESCRIPTOR &&
7869+	    (c->wValue >> 8) == USB_DT_CONFIG && !gi->connected) {
7870+		gi->connected = 1;
7871+		schedule_work(&gi->work);
7872+	}
7873+	spin_unlock_irqrestore(&cdev->lock, flags);
7874+	list_for_each_entry(fi, &gi->available_func, cfs_list) {
7875+		if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) {
7876+			value = fi->f->setup(fi->f, c);
7877+			if (value >= 0)
7878+				break;
7879+		}
7880+	}
7881+
7882+#ifdef CONFIG_USB_CONFIGFS_F_ACC
7883+	if (value < 0)
7884+		value = acc_ctrlrequest(cdev, c);
7885+#endif
7886+
7887+	if (value < 0)
7888+		value = composite_setup(gadget, c);
7889+
7890+	spin_lock_irqsave(&cdev->lock, flags);
7891+	if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
7892+						cdev->config) {
7893+		schedule_work(&gi->work);
7894+	}
7895+	spin_unlock_irqrestore(&cdev->lock, flags);
7896+
7897+	return value;
7898+}
7899+
7900+#else // CONFIG_USB_CONFIGFS_UEVENT
7901+
7902 static int configfs_composite_setup(struct usb_gadget *gadget,
7903 		const struct usb_ctrlrequest *ctrl)
7904 {
7905@@ -1474,6 +1605,8 @@
7906 	return ret;
7907 }
7908
7909+#endif // CONFIG_USB_CONFIGFS_UEVENT
7910+
7911 static void configfs_composite_disconnect(struct usb_gadget *gadget)
7912 {
7913 	struct usb_composite_dev *cdev;
7914@@ -1484,6 +1617,14 @@
7915 	if (!cdev)
7916 		return;
7917
7918+#ifdef CONFIG_USB_CONFIGFS_F_ACC
7919+	/*
7920+	 * accessory HID support can be active while the
7921+	 * accessory function is not actually enabled,
7922+	 * so we need to inform it when we are disconnected.
7923+	 */
7924+	acc_disconnect();
7925+#endif
7926 	gi = container_of(cdev, struct gadget_info, cdev);
7927 	spin_lock_irqsave(&gi->spinlock, flags);
7928 	cdev = get_gadget_data(gadget);
7929@@ -1492,10 +1633,36 @@
7930 		return;
7931 	}
7932
7933+#ifdef CONFIG_USB_CONFIGFS_UEVENT
7934+	gi->connected = 0;
7935+	schedule_work(&gi->work);
7936+#endif
7937 	composite_disconnect(gadget);
7938 	spin_unlock_irqrestore(&gi->spinlock, flags);
7939 }
7940
7941+static void configfs_composite_reset(struct usb_gadget *gadget)
7942+{
7943+	struct usb_composite_dev *cdev;
7944+	struct gadget_info *gi;
7945+	unsigned long flags;
7946+
7947+	cdev = get_gadget_data(gadget);
7948+	if (!cdev)
7949+		return;
7950+
7951+	gi = container_of(cdev, struct gadget_info, cdev);
7952+	spin_lock_irqsave(&gi->spinlock, flags);
7953+	cdev = get_gadget_data(gadget);
7954+	if (!cdev || gi->unbind) {
7955+		spin_unlock_irqrestore(&gi->spinlock, flags);
7956+		return;
7957+	}
7958+
7959+	composite_reset(gadget);
7960+	spin_unlock_irqrestore(&gi->spinlock, flags);
7961+}
7962+
7963 static void configfs_composite_suspend(struct usb_gadget *gadget)
7964 {
7965 	struct usb_composite_dev *cdev;
7966@@ -1544,10 +1711,13 @@
7967 	.bind           = configfs_composite_bind,
7968 	.unbind         = configfs_composite_unbind,
7969
7970+#ifdef CONFIG_USB_CONFIGFS_UEVENT
7971+	.setup          = android_setup,
7972+#else
7973 	.setup          = configfs_composite_setup,
7974-	.reset          = configfs_composite_disconnect,
7975+#endif
7976+	.reset          = configfs_composite_reset,
7977 	.disconnect     = configfs_composite_disconnect,
7978-
7979 	.suspend	= configfs_composite_suspend,
7980 	.resume		= configfs_composite_resume,
7981
7982@@ -1559,6 +1729,91 @@
7983 	.match_existing_only = 1,
7984 };
7985
7986+#ifdef CONFIG_USB_CONFIGFS_UEVENT
7987+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
7988+			char *buf)
7989+{
7990+	struct gadget_info *dev = dev_get_drvdata(pdev);
7991+	struct usb_composite_dev *cdev;
7992+	char *state = "DISCONNECTED";
7993+	unsigned long flags;
7994+
7995+	if (!dev)
7996+		goto out;
7997+
7998+	cdev = &dev->cdev;
7999+
8000+	if (!cdev)
8001+		goto out;
8002+
8003+	spin_lock_irqsave(&cdev->lock, flags);
8004+	if (cdev->config)
8005+		state = "CONFIGURED";
8006+	else if (dev->connected)
8007+		state = "CONNECTED";
8008+	spin_unlock_irqrestore(&cdev->lock, flags);
8009+out:
8010+	return sprintf(buf, "%s\n", state);
8011+}
8012+
8013+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
8014+
8015+static struct device_attribute *android_usb_attributes[] = {
8016+	&dev_attr_state,
8017+	NULL
8018+};
8019+
8020+static int android_device_create(struct gadget_info *gi)
8021+{
8022+	struct device_attribute **attrs;
8023+	struct device_attribute *attr;
8024+
8025+	INIT_WORK(&gi->work, android_work);
8026+	gi->dev = device_create(android_class, NULL,
8027+			MKDEV(0, 0), NULL, "android%d", gadget_index++);
8028+	if (IS_ERR(gi->dev))
8029+		return PTR_ERR(gi->dev);
8030+
8031+	dev_set_drvdata(gi->dev, gi);
8032+	if (!android_device)
8033+		android_device = gi->dev;
8034+
8035+	attrs = android_usb_attributes;
8036+	while ((attr = *attrs++)) {
8037+		int err;
8038+
8039+		err = device_create_file(gi->dev, attr);
8040+		if (err) {
8041+			device_destroy(gi->dev->class,
8042+				       gi->dev->devt);
8043+			return err;
8044+		}
8045+	}
8046+
8047+	return 0;
8048+}
8049+
8050+static void android_device_destroy(struct gadget_info *gi)
8051+{
8052+	struct device_attribute **attrs;
8053+	struct device_attribute *attr;
8054+
8055+	attrs = android_usb_attributes;
8056+	while ((attr = *attrs++))
8057+		device_remove_file(gi->dev, attr);
8058+	device_destroy(gi->dev->class, gi->dev->devt);
8059+}
8060+#else
8061+static inline int android_device_create(struct gadget_info *gi)
8062+{
8063+	return 0;
8064+}
8065+
8066+static inline void android_device_destroy(struct gadget_info *gi)
8067+{
8068+}
8069+#endif
8070+
8071 static struct config_group *gadgets_make(
8072 		struct config_group *group,
8073 		const char *name)
8074@@ -1611,7 +1866,11 @@
8075 	if (!gi->composite.gadget_driver.function)
8076 		goto err;
8077
8078+	if (android_device_create(gi) < 0)
8079+		goto err;
8080+
8081 	return &gi->group;
8082+
8083 err:
8084 	kfree(gi);
8085 	return ERR_PTR(-ENOMEM);
8086@@ -1619,7 +1878,11 @@
8087
8088 static void gadgets_drop(struct config_group *group, struct config_item *item)
8089 {
8090+	struct gadget_info *gi;
8091+
8092+	gi = container_of(to_config_group(item), struct gadget_info, group);
8093 	config_item_put(item);
8094+	android_device_destroy(gi);
8095 }
8096
8097 static struct configfs_group_operations gadgets_ops = {
8098@@ -1659,6 +1922,13 @@
8099 	config_group_init(&gadget_subsys.su_group);
8100
8101 	ret = configfs_register_subsystem(&gadget_subsys);
8102+
8103+#ifdef CONFIG_USB_CONFIGFS_UEVENT
8104+	android_class = class_create(THIS_MODULE, "android_usb");
8105+	if (IS_ERR(android_class))
8106+		return PTR_ERR(android_class);
8107+#endif
8108+
8109 	return ret;
8110 }
8111 module_init(gadget_cfs_init);
8112@@ -1666,5 +1936,10 @@
8113 static void __exit gadget_cfs_exit(void)
8114 {
8115 	configfs_unregister_subsystem(&gadget_subsys);
8116+#ifdef CONFIG_USB_CONFIGFS_UEVENT
8117+	if (!IS_ERR(android_class))
8118+		class_destroy(android_class);
8119+#endif
8120+
8121 }
8122 module_exit(gadget_cfs_exit);
8123diff -Naur a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
8124--- a/drivers/usb/gadget/Kconfig	2022-12-19 17:13:14.769544059 +0800
8125+++ b/drivers/usb/gadget/Kconfig	2023-02-23 17:02:04.963751125 +0800
8126@@ -216,6 +216,12 @@
8127 config USB_F_TCM
8128 	tristate
8129
8130+config USB_F_ACC
8131+	tristate
8132+
8133+config USB_F_AUDIO_SRC
8134+	tristate
8135+
8136 # this first set of drivers all depend on bulk-capable hardware.
8137
8138 config USB_CONFIGFS
8139@@ -230,6 +236,14 @@
8140 	  appropriate symbolic links.
8141 	  For more information see Documentation/usb/gadget_configfs.rst.
8142
8143+config USB_CONFIGFS_UEVENT
8144+	bool "Uevent notification of Gadget state"
8145+	depends on USB_CONFIGFS
8146+	help
8147+	  Enable uevent notifications to userspace when the gadget
8148+	  state changes. The gadget can be in any of the following
8149+	  three states: "CONNECTED/DISCONNECTED/CONFIGURED"
8150+
8151 config USB_CONFIGFS_SERIAL
8152 	bool "Generic serial bulk in/out"
8153 	depends on USB_CONFIGFS
8154@@ -371,6 +385,23 @@
8155 	  implemented in kernel space (for instance Ethernet, serial or
8156 	  mass storage) and other are implemented in user space.
8157
8158+config USB_CONFIGFS_F_ACC
8159+	bool "Accessory gadget"
8160+	depends on USB_CONFIGFS
8161+	depends on HID=y
8162+	select USB_F_ACC
8163+	help
8164+	  USB gadget Accessory support
8165+
8166+config USB_CONFIGFS_F_AUDIO_SRC
8167+	bool "Audio Source gadget"
8168+	depends on USB_CONFIGFS
8169+	depends on SND
8170+	select SND_PCM
8171+	select USB_F_AUDIO_SRC
8172+	help
8173+	  USB gadget Audio Source support
8174+
8175 config USB_CONFIGFS_F_UAC1
8176 	bool "Audio Class 1.0"
8177 	depends on USB_CONFIGFS
8178diff -Naur a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
8179--- a/drivers/usb/serial/ch341.c	2022-12-19 17:13:14.821544682 +0800
8180+++ b/drivers/usb/serial/ch341.c	2023-02-23 17:02:04.963751125 +0800
8181@@ -84,6 +84,7 @@
8182 	{ USB_DEVICE(0x1a86, 0x5523) },
8183 	{ USB_DEVICE(0x1a86, 0x7522) },
8184 	{ USB_DEVICE(0x1a86, 0x7523) },
8185+	{ USB_DEVICE(0x1a86, 0xe019) },
8186 	{ USB_DEVICE(0x2184, 0x0057) },
8187 	{ USB_DEVICE(0x4348, 0x5523) },
8188 	{ USB_DEVICE(0x9986, 0x7523) },
8189diff -Naur a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
8190--- a/drivers/video/fbdev/core/fbcon.c	2022-12-19 17:13:14.865545209 +0800
8191+++ b/drivers/video/fbdev/core/fbcon.c	2023-02-23 17:02:04.967751180 +0800
8192@@ -365,6 +365,7 @@
8193
8194 static void fb_flashcursor(struct work_struct *work)
8195 {
8196+#ifdef CONSOLE_CURSOR_ON
8197 	struct fb_info *info = container_of(work, struct fb_info, queue);
8198 	struct fbcon_ops *ops = info->fbcon_par;
8199 	struct vc_data *vc = NULL;
8200@@ -395,6 +396,7 @@
8201 	ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
8202 		    get_color(vc, info, c, 0));
8203 	console_unlock();
8204+#endif
8205 }
8206
8207 static void cursor_timer_handler(struct timer_list *t)
8208@@ -1331,6 +1333,7 @@
8209
8210 static void fbcon_cursor(struct vc_data *vc, int mode)
8211 {
8212+#ifdef CONSOLE_CURSOR_ON
8213 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
8214 	struct fbcon_ops *ops = info->fbcon_par;
8215  	int c = scr_readw((u16 *) vc->vc_pos);
8216@@ -1352,6 +1355,7 @@
8217
8218 	ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
8219 		    get_color(vc, info, c, 0));
8220+#endif
8221 }
8222
8223 static int scrollback_phys_max = 0;
8224diff -Naur a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
8225--- a/drivers/video/fbdev/core/fbmem.c	2022-12-19 17:13:14.865545209 +0800
8226+++ b/drivers/video/fbdev/core/fbmem.c	2023-02-23 17:02:04.967751180 +0800
8227@@ -53,9 +53,9 @@
8228 int num_registered_fb __read_mostly;
8229 EXPORT_SYMBOL(num_registered_fb);
8230
8231-bool fb_center_logo __read_mostly;
8232+bool fb_center_logo = true;
8233
8234-int fb_logo_count __read_mostly = -1;
8235+int fb_logo_count = 1;
8236
8237 static struct fb_info *get_fb_info(unsigned int idx)
8238 {
8239diff -Naur a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
8240--- a/include/drm/bridge/dw_hdmi.h	2022-12-19 17:13:15.185549044 +0800
8241+++ b/include/drm/bridge/dw_hdmi.h	2023-02-23 17:02:04.967751180 +0800
8242@@ -126,6 +126,8 @@
8243 struct dw_hdmi_plat_data {
8244 	struct regmap *regm;
8245
8246+	unsigned int output_port;
8247+
8248 	unsigned long input_bus_encoding;
8249 	bool use_drm_infoframe;
8250 	bool ycbcr_420_allowed;
8251@@ -153,6 +155,8 @@
8252 	const struct dw_hdmi_phy_config *phy_config;
8253 	int (*configure_phy)(struct dw_hdmi *hdmi, void *data,
8254 			     unsigned long mpixelclock);
8255+
8256+	unsigned int disable_cec : 1;
8257 };
8258
8259 struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
8260diff -Naur a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
8261--- a/include/drm/bridge/dw_mipi_dsi.h	2022-12-19 17:13:15.185549044 +0800
8262+++ b/include/drm/bridge/dw_mipi_dsi.h	2023-02-23 17:02:04.967751180 +0800
8263@@ -51,7 +51,9 @@
8264 	unsigned int max_data_lanes;
8265
8266 	enum drm_mode_status (*mode_valid)(void *priv_data,
8267-					   const struct drm_display_mode *mode);
8268+					   const struct drm_display_mode *mode,
8269+					   unsigned long mode_flags,
8270+					   u32 lanes, u32 format);
8271
8272 	const struct dw_mipi_dsi_phy_ops *phy_ops;
8273 	const struct dw_mipi_dsi_host_ops *host_ops;
8274diff -Naur a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
8275--- a/include/drm/drm_aperture.h	1970-01-01 08:00:00.000000000 +0800
8276+++ b/include/drm/drm_aperture.h	2023-02-23 17:02:04.967751180 +0800
8277@@ -0,0 +1,39 @@
8278+/* SPDX-License-Identifier: MIT */
8279+
8280+#ifndef _DRM_APERTURE_H_
8281+#define _DRM_APERTURE_H_
8282+
8283+#include <linux/types.h>
8284+
8285+struct drm_device;
8286+struct drm_driver;
8287+struct pci_dev;
8288+
8289+int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
8290+					resource_size_t size);
8291+
8292+int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
8293+						 bool primary, const struct drm_driver *req_driver);
8294+
8295+int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
8296+						     const struct drm_driver *req_driver);
8297+
8298+/**
8299+ * drm_aperture_remove_framebuffers - remove all existing framebuffers
8300+ * @primary: also kick vga16fb if present
8301+ * @req_driver: requesting DRM driver
8302+ *
8303+ * This function removes all graphics device drivers. Use this function on systems
8304+ * that can have their framebuffer located anywhere in memory.
8305+ *
8306+ * Returns:
8307+ * 0 on success, or a negative errno code otherwise
8308+ */
8309+static inline int
8310+drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver)
8311+{
8312+	return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary,
8313+							    req_driver);
8314+}
8315+
8316+#endif
8317diff -Naur a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
8318--- a/include/drm/drm_atomic_helper.h	2022-12-19 17:13:15.185549044 +0800
8319+++ b/include/drm/drm_atomic_helper.h	2023-02-23 17:02:04.967751180 +0800
8320@@ -167,7 +167,7 @@
8321 	drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
8322
8323 /**
8324- * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
8325+ * drm_atomic_crtc_state_for_each_plane - iterate over attached planes in new state
8326  * @plane: the loop cursor
8327  * @crtc_state: the incoming CRTC state
8328  *
8329@@ -180,7 +180,7 @@
8330 	drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
8331
8332 /**
8333- * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
8334+ * drm_atomic_crtc_state_for_each_plane_state - iterate over attached planes in new state
8335  * @plane: the loop cursor
8336  * @plane_state: loop cursor for the plane's state, must be const
8337  * @crtc_state: the incoming CRTC state
8338diff -Naur a/include/drm/drm_connector.h b/include/drm/drm_connector.h
8339--- a/include/drm/drm_connector.h	2022-12-19 17:13:15.185549044 +0800
8340+++ b/include/drm/drm_connector.h	2023-02-23 17:02:04.967751180 +0800
8341@@ -1622,6 +1622,9 @@
8342 					       u32 scaling_mode_mask);
8343 int drm_connector_attach_vrr_capable_property(
8344 		struct drm_connector *connector);
8345+int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
8346+bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
8347+					     struct drm_connector_state *new_state);
8348 int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
8349 int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
8350 int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
8351diff -Naur a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
8352--- a/include/dt-bindings/clock/g12a-clkc.h	2022-12-19 17:13:15.193549139 +0800
8353+++ b/include/dt-bindings/clock/g12a-clkc.h	2023-02-23 17:02:04.967751180 +0800
8354@@ -147,5 +147,26 @@
8355 #define CLKID_SPICC1_SCLK			261
8356 #define CLKID_NNA_AXI_CLK			264
8357 #define CLKID_NNA_CORE_CLK			267
8358+#define CLKID_MIPI_DSI_PXCLK_SEL		269
8359+#define CLKID_MIPI_DSI_PXCLK			270
8360+#define CLKID_24M						271
8361+#define CLKID_MIPI_ISP_CLK_COMP			272
8362+#define CLKID_MIPI_CSI_PHY_CLK0_COMP	273
8363
8364+#define CLKID_VDEC_P0_COMP				274
8365+#define CLKID_VDEC_P1_COMP				275
8366+#define CLKID_VDEC_MUX					276
8367+#define CLKID_HCODEC_P0_COMP			277
8368+#define CLKID_HCODEC_P1_COMP			278
8369+#define CLKID_HCODEC_MUX				279
8370+#define CLKID_HEVC_P0_COMP				280
8371+#define CLKID_HEVC_P1_COMP				281
8372+#define CLKID_HEVC_MUX					282
8373+#define CLKID_HEVCF_P0_COMP				283
8374+#define CLKID_HEVCF_P1_COMP				284
8375+#define CLKID_HEVCF_MUX					285
8376+
8377+#define CLKID_VPU_CLKB_TMP_COMP			286
8378+#define CLKID_VPU_CLKB_COMP				287
8379+
8380 #endif /* __G12A_CLKC_H */
8381diff -Naur a/include/linux/mmc/card.h b/include/linux/mmc/card.h
8382--- a/include/linux/mmc/card.h	2022-12-19 17:13:15.285550241 +0800
8383+++ b/include/linux/mmc/card.h	2023-02-23 17:02:04.967751180 +0800
8384@@ -310,6 +310,7 @@
8385 	struct dentry		*debugfs_root;
8386 	struct mmc_part	part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
8387 	unsigned int    nr_parts;
8388+	unsigned int    key_stamp;
8389
8390 	unsigned int		bouncesz;	/* Bounce buffer size */
8391 	struct workqueue_struct *complete_wq;	/* Private workqueue */
8392diff -Naur a/include/linux/mmc/emmc_partitions.h b/include/linux/mmc/emmc_partitions.h
8393--- a/include/linux/mmc/emmc_partitions.h	1970-01-01 08:00:00.000000000 +0800
8394+++ b/include/linux/mmc/emmc_partitions.h	2023-02-23 17:02:04.967751180 +0800
8395@@ -0,0 +1,85 @@
8396+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
8397+/*
8398+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
8399+ */
8400+
8401+#ifndef _EMMC_PARTITIONS_H
8402+#define _EMMC_PARTITIONS_H
8403+
8404+#include<linux/genhd.h>
8405+
8406+#include <linux/mmc/host.h>
8407+#include <linux/mmc/card.h>
8408+#include <linux/mmc/mmc.h>
8409+#include <linux/mmc/core.h>
8410+
8411+/* #include <mach/register.h> */
8412+/* #include <mach/am_regs.h> */
8413+#define CONFIG_DTB_SIZE  (256 * 1024U)
8414+#define DTB_CELL_SIZE	(16 * 1024U)
8415+#define	STORE_CODE				1
8416+#define	STORE_CACHE				BIT(1)
8417+#define	STORE_DATA				BIT(2)
8418+
8419+#define     MAX_PART_NAME_LEN               16
8420+#define     MAX_MMC_PART_NUM                32
8421+
8422+/* MMC Partition Table */
8423+#define     MMC_PARTITIONS_MAGIC            "MPT"
8424+#define     MMC_RESERVED_NAME               "reserved"
8425+
8426+#define     SZ_1M                           0x00100000
8427+
8428+/* the size of bootloader partition */
8429+#define     MMC_BOOT_PARTITION_SIZE         (4 * SZ_1M)
8430+#define		MMC_TUNING_OFFSET               0X14400
8431+
8432+/* the size of reserve space behind bootloader partition */
8433+#define     MMC_BOOT_PARTITION_RESERVED     (32 * SZ_1M)
8434+
8435+#define     RESULT_OK                       0
8436+#define     RESULT_FAIL                     1
8437+#define     RESULT_UNSUP_HOST               2
8438+#define     RESULT_UNSUP_CARD               3
8439+
8440+struct partitions {
8441+	/* identifier string */
8442+	char name[MAX_PART_NAME_LEN];
8443+	/* partition size, byte unit */
8444+	u64 size;
8445+	/* offset within the master space, byte unit */
8446+	u64 offset;
8447+	/* master flags to mask out for this partition */
8448+	unsigned int mask_flags;
8449+};
8450+
8451+struct mmc_partitions_fmt {
8452+	char magic[4];
8453+	unsigned char version[12];
8454+	int part_num;
8455+	int checksum;
8456+	struct partitions partitions[MAX_MMC_PART_NUM];
8457+};
8458+
8459+/*#ifdef CONFIG_MMC_AML*/
8460+int aml_emmc_partition_ops(struct mmc_card *card, struct gendisk *disk);
8461+int add_fake_boot_partition(struct gendisk *disk, char *name, int idx);
8462+/*
8463+ *#else
8464+ *static inline int aml_emmc_partition_ops(struct mmc_card *card,
8465+ *					 struct gendisk *disk)
8466+ *{
8467+ *	return -1;
8468+ *}
8469+ *#endif
8470+ */
8471+unsigned int mmc_capacity(struct mmc_card *card);
8472+int mmc_read_internal(struct mmc_card *card,
8473+		      unsigned int dev_addr, unsigned int blocks, void *buf);
8474+int mmc_write_internal(struct mmc_card *card,
8475+		       unsigned int dev_addr, unsigned int blocks, void *buf);
8476+int get_reserve_partition_off_from_tbl(void);
8477+#endif
8478+
8479+extern struct mmc_partitions_fmt *pt_fmt;
8480+
8481diff -Naur a/include/linux/soc/amlogic/meson-canvas.h b/include/linux/soc/amlogic/meson-canvas.h
8482--- a/include/linux/soc/amlogic/meson-canvas.h	2022-12-19 17:13:15.329550769 +0800
8483+++ b/include/linux/soc/amlogic/meson-canvas.h	2023-02-23 17:02:04.967751180 +0800
8484@@ -39,6 +39,14 @@
8485 int meson_canvas_alloc(struct meson_canvas *canvas, u8 *canvas_index);
8486
8487 /**
8488+ * meson_canvas_alloc_spec() - take ownership of a canvas
8489+ *
8490+ * @canvas: canvas provider instance retrieved from meson_canvas_get()
8491+ * @canvas_index: the specified canvas index
8492+ */
8493+int meson_canvas_alloc_spec(struct meson_canvas *canvas, u8 canvas_index);
8494+
8495+/**
8496  * meson_canvas_free() - remove ownership from a canvas
8497  *
8498  * @canvas: canvas provider instance retrieved from meson_canvas_get()
8499diff -Naur a/include/linux/usb/composite.h b/include/linux/usb/composite.h
8500--- a/include/linux/usb/composite.h	2022-12-19 17:13:15.345550961 +0800
8501+++ b/include/linux/usb/composite.h	2023-02-23 17:02:04.967751180 +0800
8502@@ -525,6 +525,8 @@
8503 extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
8504
8505 extern void composite_disconnect(struct usb_gadget *gadget);
8506+extern void composite_reset(struct usb_gadget *gadget);
8507+
8508 extern int composite_setup(struct usb_gadget *gadget,
8509 		const struct usb_ctrlrequest *ctrl);
8510 extern void composite_suspend(struct usb_gadget *gadget);
8511@@ -590,6 +592,7 @@
8512 	struct config_group group;
8513 	struct list_head cfs_list;
8514 	struct usb_function_driver *fd;
8515+	struct usb_function *f;
8516 	int (*set_inst_name)(struct usb_function_instance *inst,
8517 			      const char *name);
8518 	void (*free_func_inst)(struct usb_function_instance *inst);
8519diff -Naur a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
8520--- a/kernel/dma/contiguous.c	2022-12-19 17:13:15.481552591 +0800
8521+++ b/kernel/dma/contiguous.c	2023-02-23 17:02:04.967751180 +0800
8522@@ -262,6 +262,7 @@
8523
8524 	return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
8525 }
8526+EXPORT_SYMBOL(dma_alloc_from_contiguous);
8527
8528 /**
8529  * dma_release_from_contiguous() - release allocated pages
8530@@ -278,6 +279,7 @@
8531 {
8532 	return cma_release(dev_get_cma_area(dev), pages, count);
8533 }
8534+EXPORT_SYMBOL(dma_release_from_contiguous);
8535
8536 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
8537 {
8538diff -Naur a/kernel/sched/core.c b/kernel/sched/core.c
8539--- a/kernel/sched/core.c	2022-12-19 17:13:15.505552878 +0800
8540+++ b/kernel/sched/core.c	2023-02-23 17:02:04.967751180 +0800
8541@@ -5711,6 +5711,7 @@
8542 {
8543 	return _sched_setscheduler(p, policy, param, true);
8544 }
8545+EXPORT_SYMBOL_GPL(sched_setscheduler);
8546
8547 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
8548 {
8549diff -Naur a/Makefile b/Makefile
8550--- a/Makefile	2023-02-08 19:27:44.107724365 +0800
8551+++ b/Makefile	2023-02-23 17:02:04.971751235 +0800
8552@@ -491,6 +491,7 @@
8553 		-I$(objtree)/arch/$(SRCARCH)/include/generated \
8554 		$(if $(building_out_of_srctree),-I$(srctree)/include) \
8555 		-I$(objtree)/include \
8556+		-I$(objtree)/vendor/include \
8557 		$(USERINCLUDE)
8558
8559 KBUILD_AFLAGS   := -D__ASSEMBLY__ -fno-PIE
8560@@ -861,7 +862,7 @@
8561 endif
8562
8563 endif # CONFIG_DEBUG_INFO
8564-
8565+KBUILD_CFLAGS += -Wno-unused-function
8566 KBUILD_CFLAGS += $(DEBUG_CFLAGS)
8567 export DEBUG_CFLAGS
8568
8569@@ -987,7 +988,7 @@
8570 KBUILD_CFLAGS   += -Werror=date-time
8571
8572 # enforce correct pointer usage
8573-KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
8574+#KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
8575
8576 # Require designated initializers for all marked structures
8577 KBUILD_CFLAGS   += $(call cc-option,-Werror=designated-init)
8578@@ -1391,7 +1392,7 @@
8579 # Devicetree files
8580
8581 ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),)
8582-dtstree := arch/$(SRCARCH)/boot/dts
8583+dtstree := vendor/arch/$(SRCARCH)/boot/dts
8584 endif
8585
8586 ifneq ($(dtstree),)
8587diff -Naur a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
8588--- a/drivers/dma-buf/heaps/Kconfig	2023-09-19 21:04:00.210684653 +0800
8589+++ b/drivers/dma-buf/heaps/Kconfig	2023-11-14 09:26:42.087243600 +0800
8590@@ -1,12 +1,22 @@
8591+menuconfig DMABUF_HEAPS_DEFERRED_FREE
8592+	bool "DMA-BUF heaps deferred-free library"
8593+	help
8594+	  Choose this option to enable the DMA-BUF heaps deferred-free library.
8595+
8596+menuconfig DMABUF_HEAPS_PAGE_POOL
8597+	bool "DMA-BUF heaps page-pool library"
8598+	help
8599+	  Choose this option to enable the DMA-BUF heaps page-pool library.
8600+
8601 config DMABUF_HEAPS_SYSTEM
8602-	bool "DMA-BUF System Heap"
8603-	depends on DMABUF_HEAPS
8604+	tristate "DMA-BUF System Heap"
8605+	depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE && DMABUF_HEAPS_PAGE_POOL
8606 	help
8607 	  Choose this option to enable the system dmabuf heap. The system heap
8608 	  is backed by pages from the buddy allocator. If in doubt, say Y.
8609
8610 config DMABUF_HEAPS_CMA
8611-	bool "DMA-BUF CMA Heap"
8612+	tristate "DMA-BUF CMA Heap"
8613 	depends on DMABUF_HEAPS && DMA_CMA
8614 	help
8615 	  Choose this option to enable dma-buf CMA heap. This heap is backed
8616diff -Naur a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
8617--- a/drivers/dma-buf/heaps/Makefile	2023-09-19 21:04:00.210684653 +0800
8618+++ b/drivers/dma-buf/heaps/Makefile	2023-11-14 09:26:42.087243600 +0800
8619@@ -1,4 +1,5 @@
8620 # SPDX-License-Identifier: GPL-2.0
8621-obj-y					+= heap-helpers.o
8622+obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o
8623+obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL)	+= page_pool.o
8624 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)	+= system_heap.o
8625 obj-$(CONFIG_DMABUF_HEAPS_CMA)		+= cma_heap.o
8626diff -Naur a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
8627--- a/drivers/dma-buf/heaps/cma_heap.c	2023-09-19 21:04:00.210684653 +0800
8628+++ b/drivers/dma-buf/heaps/cma_heap.c	2023-11-14 09:26:42.087243600 +0800
8629@@ -2,76 +2,304 @@
8630 /*
8631  * DMABUF CMA heap exporter
8632  *
8633- * Copyright (C) 2012, 2019 Linaro Ltd.
8634+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
8635  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
8636+ *
8637+ * Also utilizing parts of Andrew Davis' SRAM heap:
8638+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
8639+ *	Andrew F. Davis <afd@ti.com>
8640  */
8641-
8642 #include <linux/cma.h>
8643-#include <linux/device.h>
8644 #include <linux/dma-buf.h>
8645 #include <linux/dma-heap.h>
8646 #include <linux/dma-map-ops.h>
8647 #include <linux/err.h>
8648-#include <linux/errno.h>
8649 #include <linux/highmem.h>
8650+#include <linux/io.h>
8651+#include <linux/mm.h>
8652 #include <linux/module.h>
8653-#include <linux/slab.h>
8654 #include <linux/scatterlist.h>
8655-#include <linux/sched/signal.h>
8656+#include <linux/slab.h>
8657+#include <linux/vmalloc.h>
8658
8659-#include "heap-helpers.h"
8660
8661 struct cma_heap {
8662 	struct dma_heap *heap;
8663 	struct cma *cma;
8664 };
8665
8666-static void cma_heap_free(struct heap_helper_buffer *buffer)
8667+struct cma_heap_buffer {
8668+	struct cma_heap *heap;
8669+	struct list_head attachments;
8670+	struct mutex lock;
8671+	unsigned long len;
8672+	struct page *cma_pages;
8673+	struct page **pages;
8674+	pgoff_t pagecount;
8675+	int vmap_cnt;
8676+	void *vaddr;
8677+};
8678+
8679+struct dma_heap_attachment {
8680+	struct device *dev;
8681+	struct sg_table table;
8682+	struct list_head list;
8683+	bool mapped;
8684+};
8685+
8686+static int cma_heap_attach(struct dma_buf *dmabuf,
8687+			   struct dma_buf_attachment *attachment)
8688 {
8689-	struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
8690-	unsigned long nr_pages = buffer->pagecount;
8691-	struct page *cma_pages = buffer->priv_virt;
8692+	struct cma_heap_buffer *buffer = dmabuf->priv;
8693+	struct dma_heap_attachment *a;
8694+	int ret;
8695+
8696+	a = kzalloc(sizeof(*a), GFP_KERNEL);
8697+	if (!a)
8698+		return -ENOMEM;
8699+
8700+	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
8701+					buffer->pagecount, 0,
8702+					buffer->pagecount << PAGE_SHIFT,
8703+					GFP_KERNEL);
8704+	if (ret) {
8705+		kfree(a);
8706+		return ret;
8707+	}
8708+
8709+	a->dev = attachment->dev;
8710+	INIT_LIST_HEAD(&a->list);
8711+	a->mapped = false;
8712+
8713+	attachment->priv = a;
8714+
8715+	mutex_lock(&buffer->lock);
8716+	list_add(&a->list, &buffer->attachments);
8717+	mutex_unlock(&buffer->lock);
8718+
8719+	return 0;
8720+}
8721+
8722+static void cma_heap_detach(struct dma_buf *dmabuf,
8723+			    struct dma_buf_attachment *attachment)
8724+{
8725+	struct cma_heap_buffer *buffer = dmabuf->priv;
8726+	struct dma_heap_attachment *a = attachment->priv;
8727+
8728+	mutex_lock(&buffer->lock);
8729+	list_del(&a->list);
8730+	mutex_unlock(&buffer->lock);
8731+
8732+	sg_free_table(&a->table);
8733+	kfree(a);
8734+}
8735+
8736+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
8737+					     enum dma_data_direction direction)
8738+{
8739+	struct dma_heap_attachment *a = attachment->priv;
8740+	struct sg_table *table = &a->table;
8741+	int ret;
8742+
8743+	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
8744+	if (ret)
8745+		return ERR_PTR(-ENOMEM);
8746+	a->mapped = true;
8747+	return table;
8748+}
8749+
8750+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
8751+				   struct sg_table *table,
8752+				   enum dma_data_direction direction)
8753+{
8754+	struct dma_heap_attachment *a = attachment->priv;
8755+
8756+	a->mapped = false;
8757+	dma_unmap_sgtable(attachment->dev, table, direction, 0);
8758+}
8759+
8760+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
8761+					     enum dma_data_direction direction)
8762+{
8763+	struct cma_heap_buffer *buffer = dmabuf->priv;
8764+	struct dma_heap_attachment *a;
8765+
8766+	if (buffer->vmap_cnt)
8767+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
8768+
8769+	mutex_lock(&buffer->lock);
8770+	list_for_each_entry(a, &buffer->attachments, list) {
8771+		if (!a->mapped)
8772+			continue;
8773+		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
8774+	}
8775+	mutex_unlock(&buffer->lock);
8776+
8777+	return 0;
8778+}
8779+
8780+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
8781+					   enum dma_data_direction direction)
8782+{
8783+	struct cma_heap_buffer *buffer = dmabuf->priv;
8784+	struct dma_heap_attachment *a;
8785+
8786+	if (buffer->vmap_cnt)
8787+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
8788+
8789+	mutex_lock(&buffer->lock);
8790+	list_for_each_entry(a, &buffer->attachments, list) {
8791+		if (!a->mapped)
8792+			continue;
8793+		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
8794+	}
8795+	mutex_unlock(&buffer->lock);
8796+
8797+	return 0;
8798+}
8799+
8800+static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
8801+{
8802+	struct vm_area_struct *vma = vmf->vma;
8803+	struct cma_heap_buffer *buffer = vma->vm_private_data;
8804+
8805+	if (vmf->pgoff > buffer->pagecount)
8806+		return VM_FAULT_SIGBUS;
8807+
8808+	vmf->page = buffer->pages[vmf->pgoff];
8809+	get_page(vmf->page);
8810+
8811+	return 0;
8812+}
8813+
8814+static const struct vm_operations_struct dma_heap_vm_ops = {
8815+	.fault = cma_heap_vm_fault,
8816+};
8817+
8818+static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
8819+{
8820+	struct cma_heap_buffer *buffer = dmabuf->priv;
8821+
8822+	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
8823+		return -EINVAL;
8824+
8825+	vma->vm_ops = &dma_heap_vm_ops;
8826+	vma->vm_private_data = buffer;
8827+
8828+	return 0;
8829+}
8830+
8831+static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
8832+{
8833+	void *vaddr;
8834+
8835+	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
8836+	if (!vaddr)
8837+		return ERR_PTR(-ENOMEM);
8838+
8839+	return vaddr;
8840+}
8841+
8842+static void *cma_heap_vmap(struct dma_buf *dmabuf)
8843+{
8844+	struct cma_heap_buffer *buffer = dmabuf->priv;
8845+	void *vaddr;
8846+
8847+	mutex_lock(&buffer->lock);
8848+	if (buffer->vmap_cnt) {
8849+		buffer->vmap_cnt++;
8850+		vaddr = buffer->vaddr;
8851+		goto out;
8852+	}
8853+
8854+	vaddr = cma_heap_do_vmap(buffer);
8855+	if (IS_ERR(vaddr))
8856+		goto out;
8857+
8858+	buffer->vaddr = vaddr;
8859+	buffer->vmap_cnt++;
8860+out:
8861+	mutex_unlock(&buffer->lock);
8862+
8863+	return vaddr;
8864+}
8865+
8866+static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
8867+{
8868+	struct cma_heap_buffer *buffer = dmabuf->priv;
8869+
8870+	mutex_lock(&buffer->lock);
8871+	if (!--buffer->vmap_cnt) {
8872+		vunmap(buffer->vaddr);
8873+		buffer->vaddr = NULL;
8874+	}
8875+	mutex_unlock(&buffer->lock);
8876+}
8877+
8878+static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
8879+{
8880+	struct cma_heap_buffer *buffer = dmabuf->priv;
8881+	struct cma_heap *cma_heap = buffer->heap;
8882+
8883+	if (buffer->vmap_cnt > 0) {
8884+		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
8885+		vunmap(buffer->vaddr);
8886+	}
8887
8888 	/* free page list */
8889 	kfree(buffer->pages);
8890 	/* release memory */
8891-	cma_release(cma_heap->cma, cma_pages, nr_pages);
8892+	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
8893 	kfree(buffer);
8894 }
8895
8896-/* dmabuf heap CMA operations functions */
8897-static int cma_heap_allocate(struct dma_heap *heap,
8898-			     unsigned long len,
8899-			     unsigned long fd_flags,
8900-			     unsigned long heap_flags)
8901+static const struct dma_buf_ops cma_heap_buf_ops = {
8902+	.attach = cma_heap_attach,
8903+	.detach = cma_heap_detach,
8904+	.map_dma_buf = cma_heap_map_dma_buf,
8905+	.unmap_dma_buf = cma_heap_unmap_dma_buf,
8906+	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
8907+	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
8908+	.mmap = cma_heap_mmap,
8909+	.vmap = cma_heap_vmap,
8910+	.vunmap = cma_heap_vunmap,
8911+	.release = cma_heap_dma_buf_release,
8912+};
8913+
8914+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
8915+					 unsigned long len,
8916+					 unsigned long fd_flags,
8917+					 unsigned long heap_flags)
8918 {
8919 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
8920-	struct heap_helper_buffer *helper_buffer;
8921-	struct page *cma_pages;
8922+	struct cma_heap_buffer *buffer;
8923+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
8924 	size_t size = PAGE_ALIGN(len);
8925-	unsigned long nr_pages = size >> PAGE_SHIFT;
8926+	pgoff_t pagecount = size >> PAGE_SHIFT;
8927 	unsigned long align = get_order(size);
8928+	struct page *cma_pages;
8929 	struct dma_buf *dmabuf;
8930 	int ret = -ENOMEM;
8931 	pgoff_t pg;
8932
8933+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
8934+	if (!buffer)
8935+		return ERR_PTR(-ENOMEM);
8936+
8937+	INIT_LIST_HEAD(&buffer->attachments);
8938+	mutex_init(&buffer->lock);
8939+	buffer->len = size;
8940+
8941 	if (align > CONFIG_CMA_ALIGNMENT)
8942 		align = CONFIG_CMA_ALIGNMENT;
8943
8944-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
8945-	if (!helper_buffer)
8946-		return -ENOMEM;
8947-
8948-	init_heap_helper_buffer(helper_buffer, cma_heap_free);
8949-	helper_buffer->heap = heap;
8950-	helper_buffer->size = len;
8951-
8952-	cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
8953+	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
8954 	if (!cma_pages)
8955-		goto free_buf;
8956+		goto free_buffer;
8957
8958+	/* Clear the cma pages */
8959 	if (PageHighMem(cma_pages)) {
8960-		unsigned long nr_clear_pages = nr_pages;
8961+		unsigned long nr_clear_pages = pagecount;
8962 		struct page *page = cma_pages;
8963
8964 		while (nr_clear_pages > 0) {
8965@@ -85,7 +313,6 @@
8966 			 */
8967 			if (fatal_signal_pending(current))
8968 				goto free_cma;
8969-
8970 			page++;
8971 			nr_clear_pages--;
8972 		}
8973@@ -93,44 +320,41 @@
8974 		memset(page_address(cma_pages), 0, size);
8975 	}
8976
8977-	helper_buffer->pagecount = nr_pages;
8978-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
8979-					     sizeof(*helper_buffer->pages),
8980-					     GFP_KERNEL);
8981-	if (!helper_buffer->pages) {
8982+	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
8983+	if (!buffer->pages) {
8984 		ret = -ENOMEM;
8985 		goto free_cma;
8986 	}
8987
8988-	for (pg = 0; pg < helper_buffer->pagecount; pg++)
8989-		helper_buffer->pages[pg] = &cma_pages[pg];
8990+	for (pg = 0; pg < pagecount; pg++)
8991+		buffer->pages[pg] = &cma_pages[pg];
8992+
8993+	buffer->cma_pages = cma_pages;
8994+	buffer->heap = cma_heap;
8995+	buffer->pagecount = pagecount;
8996
8997 	/* create the dmabuf */
8998-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
8999+	exp_info.exp_name = dma_heap_get_name(heap);
9000+	exp_info.ops = &cma_heap_buf_ops;
9001+	exp_info.size = buffer->len;
9002+	exp_info.flags = fd_flags;
9003+	exp_info.priv = buffer;
9004+	dmabuf = dma_buf_export(&exp_info);
9005 	if (IS_ERR(dmabuf)) {
9006 		ret = PTR_ERR(dmabuf);
9007 		goto free_pages;
9008 	}
9009
9010-	helper_buffer->dmabuf = dmabuf;
9011-	helper_buffer->priv_virt = cma_pages;
9012-
9013-	ret = dma_buf_fd(dmabuf, fd_flags);
9014-	if (ret < 0) {
9015-		dma_buf_put(dmabuf);
9016-		/* just return, as put will call release and that will free */
9017-		return ret;
9018-	}
9019-
9020-	return ret;
9021+	return dmabuf;
9022
9023 free_pages:
9024-	kfree(helper_buffer->pages);
9025+	kfree(buffer->pages);
9026 free_cma:
9027-	cma_release(cma_heap->cma, cma_pages, nr_pages);
9028-free_buf:
9029-	kfree(helper_buffer);
9030-	return ret;
9031+	cma_release(cma_heap->cma, cma_pages, pagecount);
9032+free_buffer:
9033+	kfree(buffer);
9034+
9035+	return ERR_PTR(ret);
9036 }
9037
9038 static const struct dma_heap_ops cma_heap_ops = {
9039diff -Naur a/drivers/dma-buf/heaps/deferred-free-helper.c b/drivers/dma-buf/heaps/deferred-free-helper.c
9040--- a/drivers/dma-buf/heaps/deferred-free-helper.c	1970-01-01 08:00:00.000000000 +0800
9041+++ b/drivers/dma-buf/heaps/deferred-free-helper.c	2023-11-14 09:26:42.087243600 +0800
9042@@ -0,0 +1,138 @@
9043+// SPDX-License-Identifier: GPL-2.0
9044+/*
9045+ * Deferred dmabuf freeing helper
9046+ *
9047+ * Copyright (C) 2020 Linaro, Ltd.
9048+ *
9049+ * Based on the ION page pool code
9050+ * Copyright (C) 2011 Google, Inc.
9051+ */
9052+
9053+#include <linux/freezer.h>
9054+#include <linux/list.h>
9055+#include <linux/slab.h>
9056+#include <linux/swap.h>
9057+#include <linux/sched/signal.h>
9058+
9059+#include "deferred-free-helper.h"
9060+
9061+static LIST_HEAD(free_list);
9062+static size_t list_nr_pages;
9063+wait_queue_head_t freelist_waitqueue;
9064+struct task_struct *freelist_task;
9065+static DEFINE_SPINLOCK(free_list_lock);
9066+
9067+void deferred_free(struct deferred_freelist_item *item,
9068+		   void (*free)(struct deferred_freelist_item*,
9069+				enum df_reason),
9070+		   size_t nr_pages)
9071+{
9072+	unsigned long flags;
9073+
9074+	INIT_LIST_HEAD(&item->list);
9075+	item->nr_pages = nr_pages;
9076+	item->free = free;
9077+
9078+	spin_lock_irqsave(&free_list_lock, flags);
9079+	list_add(&item->list, &free_list);
9080+	list_nr_pages += nr_pages;
9081+	spin_unlock_irqrestore(&free_list_lock, flags);
9082+	wake_up(&freelist_waitqueue);
9083+}
9084+EXPORT_SYMBOL_GPL(deferred_free);
9085+
9086+static size_t free_one_item(enum df_reason reason)
9087+{
9088+	unsigned long flags;
9089+	size_t nr_pages;
9090+	struct deferred_freelist_item *item;
9091+
9092+	spin_lock_irqsave(&free_list_lock, flags);
9093+	if (list_empty(&free_list)) {
9094+		spin_unlock_irqrestore(&free_list_lock, flags);
9095+		return 0;
9096+	}
9097+	item = list_first_entry(&free_list, struct deferred_freelist_item, list);
9098+	list_del(&item->list);
9099+	nr_pages = item->nr_pages;
9100+	list_nr_pages -= nr_pages;
9101+	spin_unlock_irqrestore(&free_list_lock, flags);
9102+
9103+	item->free(item, reason);
9104+	return nr_pages;
9105+}
9106+
9107+static unsigned long get_freelist_nr_pages(void)
9108+{
9109+	unsigned long nr_pages;
9110+	unsigned long flags;
9111+
9112+	spin_lock_irqsave(&free_list_lock, flags);
9113+	nr_pages = list_nr_pages;
9114+	spin_unlock_irqrestore(&free_list_lock, flags);
9115+	return nr_pages;
9116+}
9117+
9118+static unsigned long freelist_shrink_count(struct shrinker *shrinker,
9119+					   struct shrink_control *sc)
9120+{
9121+	return get_freelist_nr_pages();
9122+}
9123+
9124+static unsigned long freelist_shrink_scan(struct shrinker *shrinker,
9125+					  struct shrink_control *sc)
9126+{
9127+	unsigned long total_freed = 0;
9128+
9129+	if (sc->nr_to_scan == 0)
9130+		return 0;
9131+
9132+	while (total_freed < sc->nr_to_scan) {
9133+		size_t pages_freed = free_one_item(DF_UNDER_PRESSURE);
9134+
9135+		if (!pages_freed)
9136+			break;
9137+
9138+		total_freed += pages_freed;
9139+	}
9140+
9141+	return total_freed;
9142+}
9143+
9144+static struct shrinker freelist_shrinker = {
9145+	.count_objects = freelist_shrink_count,
9146+	.scan_objects = freelist_shrink_scan,
9147+	.seeks = DEFAULT_SEEKS,
9148+	.batch = 0,
9149+};
9150+
9151+static int deferred_free_thread(void *data)
9152+{
9153+	while (true) {
9154+		wait_event_freezable(freelist_waitqueue,
9155+				     get_freelist_nr_pages() > 0);
9156+
9157+		free_one_item(DF_NORMAL);
9158+	}
9159+
9160+	return 0;
9161+}
9162+
9163+static int deferred_freelist_init(void)
9164+{
9165+	list_nr_pages = 0;
9166+
9167+	init_waitqueue_head(&freelist_waitqueue);
9168+	freelist_task = kthread_run(deferred_free_thread, NULL,
9169+				    "%s", "dmabuf-deferred-free-worker");
9170+	if (IS_ERR(freelist_task)) {
9171+		pr_err("Creating thread for deferred free failed\n");
9172+		return -1;
9173+	}
9174+	sched_set_normal(freelist_task, 19);
9175+
9176+	return register_shrinker(&freelist_shrinker);
9177+}
9178+module_init(deferred_freelist_init);
9179+MODULE_LICENSE("GPL v2");
9180+
9181diff -Naur a/drivers/dma-buf/heaps/deferred-free-helper.h b/drivers/dma-buf/heaps/deferred-free-helper.h
9182--- a/drivers/dma-buf/heaps/deferred-free-helper.h	1970-01-01 08:00:00.000000000 +0800
9183+++ b/drivers/dma-buf/heaps/deferred-free-helper.h	2023-11-14 09:26:42.087243600 +0800
9184@@ -0,0 +1,55 @@
9185+/* SPDX-License-Identifier: GPL-2.0 */
9186+
9187+#ifndef DEFERRED_FREE_HELPER_H
9188+#define DEFERRED_FREE_HELPER_H
9189+
9190+/**
9191+ * df_reason - enum for reason why item was freed
9192+ *
9193+ * This provides a reason for why the free function was called
9194+ * on the item. This is useful when deferred_free is used in
9195+ * combination with a pagepool, so under pressure the page can
9196+ * be immediately freed.
9197+ *
9198+ * DF_NORMAL:         Normal deferred free
9199+ *
9200+ * DF_UNDER_PRESSURE: Free was called because the system
9201+ *                    is under memory pressure. Usually
9202+ *                    from a shrinker. Avoid allocating
9203+ *                    memory in the free call, as it may
9204+ *                    fail.
9205+ */
9206+enum df_reason {
9207+	DF_NORMAL,
9208+	DF_UNDER_PRESSURE,
9209+};
9210+
9211+/**
9212+ * deferred_freelist_item - item structure for deferred freelist
9213+ *
9214+ * This is to be added to the structure for whatever you want to
9215+ * defer freeing on.
9216+ *
9217+ * @nr_pages: number of pages used by item to be freed
9218+ * @free: function pointer to be called when freeing the item
9219+ * @list: list entry for the deferred list
9220+ */
9221+struct deferred_freelist_item {
9222+	size_t nr_pages;
9223+	void (*free)(struct deferred_freelist_item *i,
9224+		     enum df_reason reason);
9225+	struct list_head list;
9226+};
9227+
9228+/**
9229+ * deferred_free - call to add item to the deferred free list
9230+ *
9231+ * @item: Pointer to deferred_freelist_item field of a structure
9232+ * @free: Function pointer to the free call
9233+ * @nr_pages: number of pages to be freed
9234+ */
9235+void deferred_free(struct deferred_freelist_item *item,
9236+		   void (*free)(struct deferred_freelist_item *i,
9237+				enum df_reason reason),
9238+		   size_t nr_pages);
9239+#endif
9240diff -Naur a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
9241--- a/drivers/dma-buf/heaps/heap-helpers.c	2023-09-19 21:04:00.210684653 +0800
9242+++ b/drivers/dma-buf/heaps/heap-helpers.c	1970-01-01 08:00:00.000000000 +0800
9243@@ -1,271 +0,0 @@
9244-// SPDX-License-Identifier: GPL-2.0
9245-#include <linux/device.h>
9246-#include <linux/dma-buf.h>
9247-#include <linux/err.h>
9248-#include <linux/highmem.h>
9249-#include <linux/idr.h>
9250-#include <linux/list.h>
9251-#include <linux/slab.h>
9252-#include <linux/uaccess.h>
9253-#include <linux/vmalloc.h>
9254-#include <uapi/linux/dma-heap.h>
9255-
9256-#include "heap-helpers.h"
9257-
9258-void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
9259-			     void (*free)(struct heap_helper_buffer *))
9260-{
9261-	buffer->priv_virt = NULL;
9262-	mutex_init(&buffer->lock);
9263-	buffer->vmap_cnt = 0;
9264-	buffer->vaddr = NULL;
9265-	buffer->pagecount = 0;
9266-	buffer->pages = NULL;
9267-	INIT_LIST_HEAD(&buffer->attachments);
9268-	buffer->free = free;
9269-}
9270-
9271-struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
9272-					  int fd_flags)
9273-{
9274-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
9275-
9276-	exp_info.exp_name = dma_heap_get_name(buffer->heap);
9277-	exp_info.ops = &heap_helper_ops;
9278-	exp_info.size = buffer->size;
9279-	exp_info.flags = fd_flags;
9280-	exp_info.priv = buffer;
9281-
9282-	return dma_buf_export(&exp_info);
9283-}
9284-
9285-static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
9286-{
9287-	void *vaddr;
9288-
9289-	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
9290-	if (!vaddr)
9291-		return ERR_PTR(-ENOMEM);
9292-
9293-	return vaddr;
9294-}
9295-
9296-static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
9297-{
9298-	if (buffer->vmap_cnt > 0) {
9299-		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
9300-		vunmap(buffer->vaddr);
9301-	}
9302-
9303-	buffer->free(buffer);
9304-}
9305-
9306-static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
9307-{
9308-	void *vaddr;
9309-
9310-	if (buffer->vmap_cnt) {
9311-		buffer->vmap_cnt++;
9312-		return buffer->vaddr;
9313-	}
9314-	vaddr = dma_heap_map_kernel(buffer);
9315-	if (IS_ERR(vaddr))
9316-		return vaddr;
9317-	buffer->vaddr = vaddr;
9318-	buffer->vmap_cnt++;
9319-	return vaddr;
9320-}
9321-
9322-static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
9323-{
9324-	if (!--buffer->vmap_cnt) {
9325-		vunmap(buffer->vaddr);
9326-		buffer->vaddr = NULL;
9327-	}
9328-}
9329-
9330-struct dma_heaps_attachment {
9331-	struct device *dev;
9332-	struct sg_table table;
9333-	struct list_head list;
9334-};
9335-
9336-static int dma_heap_attach(struct dma_buf *dmabuf,
9337-			   struct dma_buf_attachment *attachment)
9338-{
9339-	struct dma_heaps_attachment *a;
9340-	struct heap_helper_buffer *buffer = dmabuf->priv;
9341-	int ret;
9342-
9343-	a = kzalloc(sizeof(*a), GFP_KERNEL);
9344-	if (!a)
9345-		return -ENOMEM;
9346-
9347-	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
9348-					buffer->pagecount, 0,
9349-					buffer->pagecount << PAGE_SHIFT,
9350-					GFP_KERNEL);
9351-	if (ret) {
9352-		kfree(a);
9353-		return ret;
9354-	}
9355-
9356-	a->dev = attachment->dev;
9357-	INIT_LIST_HEAD(&a->list);
9358-
9359-	attachment->priv = a;
9360-
9361-	mutex_lock(&buffer->lock);
9362-	list_add(&a->list, &buffer->attachments);
9363-	mutex_unlock(&buffer->lock);
9364-
9365-	return 0;
9366-}
9367-
9368-static void dma_heap_detach(struct dma_buf *dmabuf,
9369-			    struct dma_buf_attachment *attachment)
9370-{
9371-	struct dma_heaps_attachment *a = attachment->priv;
9372-	struct heap_helper_buffer *buffer = dmabuf->priv;
9373-
9374-	mutex_lock(&buffer->lock);
9375-	list_del(&a->list);
9376-	mutex_unlock(&buffer->lock);
9377-
9378-	sg_free_table(&a->table);
9379-	kfree(a);
9380-}
9381-
9382-static
9383-struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
9384-				      enum dma_data_direction direction)
9385-{
9386-	struct dma_heaps_attachment *a = attachment->priv;
9387-	struct sg_table *table = &a->table;
9388-	int ret;
9389-
9390-	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
9391-	if (ret)
9392-		table = ERR_PTR(ret);
9393-	return table;
9394-}
9395-
9396-static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
9397-				   struct sg_table *table,
9398-				   enum dma_data_direction direction)
9399-{
9400-	dma_unmap_sgtable(attachment->dev, table, direction, 0);
9401-}
9402-
9403-static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
9404-{
9405-	struct vm_area_struct *vma = vmf->vma;
9406-	struct heap_helper_buffer *buffer = vma->vm_private_data;
9407-
9408-	if (vmf->pgoff > buffer->pagecount)
9409-		return VM_FAULT_SIGBUS;
9410-
9411-	vmf->page = buffer->pages[vmf->pgoff];
9412-	get_page(vmf->page);
9413-
9414-	return 0;
9415-}
9416-
9417-static const struct vm_operations_struct dma_heap_vm_ops = {
9418-	.fault = dma_heap_vm_fault,
9419-};
9420-
9421-static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
9422-{
9423-	struct heap_helper_buffer *buffer = dmabuf->priv;
9424-
9425-	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
9426-		return -EINVAL;
9427-
9428-	vma->vm_ops = &dma_heap_vm_ops;
9429-	vma->vm_private_data = buffer;
9430-
9431-	return 0;
9432-}
9433-
9434-static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
9435-{
9436-	struct heap_helper_buffer *buffer = dmabuf->priv;
9437-
9438-	dma_heap_buffer_destroy(buffer);
9439-}
9440-
9441-static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
9442-					     enum dma_data_direction direction)
9443-{
9444-	struct heap_helper_buffer *buffer = dmabuf->priv;
9445-	struct dma_heaps_attachment *a;
9446-	int ret = 0;
9447-
9448-	mutex_lock(&buffer->lock);
9449-
9450-	if (buffer->vmap_cnt)
9451-		invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
9452-
9453-	list_for_each_entry(a, &buffer->attachments, list) {
9454-		dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
9455-				    direction);
9456-	}
9457-	mutex_unlock(&buffer->lock);
9458-
9459-	return ret;
9460-}
9461-
9462-static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
9463-					   enum dma_data_direction direction)
9464-{
9465-	struct heap_helper_buffer *buffer = dmabuf->priv;
9466-	struct dma_heaps_attachment *a;
9467-
9468-	mutex_lock(&buffer->lock);
9469-
9470-	if (buffer->vmap_cnt)
9471-		flush_kernel_vmap_range(buffer->vaddr, buffer->size);
9472-
9473-	list_for_each_entry(a, &buffer->attachments, list) {
9474-		dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
9475-				       direction);
9476-	}
9477-	mutex_unlock(&buffer->lock);
9478-
9479-	return 0;
9480-}
9481-
9482-static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
9483-{
9484-	struct heap_helper_buffer *buffer = dmabuf->priv;
9485-	void *vaddr;
9486-
9487-	mutex_lock(&buffer->lock);
9488-	vaddr = dma_heap_buffer_vmap_get(buffer);
9489-	mutex_unlock(&buffer->lock);
9490-
9491-	return vaddr;
9492-}
9493-
9494-static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
9495-{
9496-	struct heap_helper_buffer *buffer = dmabuf->priv;
9497-
9498-	mutex_lock(&buffer->lock);
9499-	dma_heap_buffer_vmap_put(buffer);
9500-	mutex_unlock(&buffer->lock);
9501-}
9502-
9503-const struct dma_buf_ops heap_helper_ops = {
9504-	.map_dma_buf = dma_heap_map_dma_buf,
9505-	.unmap_dma_buf = dma_heap_unmap_dma_buf,
9506-	.mmap = dma_heap_mmap,
9507-	.release = dma_heap_dma_buf_release,
9508-	.attach = dma_heap_attach,
9509-	.detach = dma_heap_detach,
9510-	.begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
9511-	.end_cpu_access = dma_heap_dma_buf_end_cpu_access,
9512-	.vmap = dma_heap_dma_buf_vmap,
9513-	.vunmap = dma_heap_dma_buf_vunmap,
9514-};
9515diff -Naur a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h
9516--- a/drivers/dma-buf/heaps/heap-helpers.h	2023-09-19 21:04:00.210684653 +0800
9517+++ b/drivers/dma-buf/heaps/heap-helpers.h	1970-01-01 08:00:00.000000000 +0800
9518@@ -1,53 +0,0 @@
9519-/* SPDX-License-Identifier: GPL-2.0 */
9520-/*
9521- * DMABUF Heaps helper code
9522- *
9523- * Copyright (C) 2011 Google, Inc.
9524- * Copyright (C) 2019 Linaro Ltd.
9525- */
9526-
9527-#ifndef _HEAP_HELPERS_H
9528-#define _HEAP_HELPERS_H
9529-
9530-#include <linux/dma-heap.h>
9531-#include <linux/list.h>
9532-
9533-/**
9534- * struct heap_helper_buffer - helper buffer metadata
9535- * @heap:		back pointer to the heap the buffer came from
9536- * @dmabuf:		backing dma-buf for this buffer
9537- * @size:		size of the buffer
9538- * @priv_virt		pointer to heap specific private value
9539- * @lock		mutext to protect the data in this structure
9540- * @vmap_cnt		count of vmap references on the buffer
9541- * @vaddr		vmap'ed virtual address
9542- * @pagecount		number of pages in the buffer
9543- * @pages		list of page pointers
9544- * @attachments		list of device attachments
9545- *
9546- * @free		heap callback to free the buffer
9547- */
9548-struct heap_helper_buffer {
9549-	struct dma_heap *heap;
9550-	struct dma_buf *dmabuf;
9551-	size_t size;
9552-
9553-	void *priv_virt;
9554-	struct mutex lock;
9555-	int vmap_cnt;
9556-	void *vaddr;
9557-	pgoff_t pagecount;
9558-	struct page **pages;
9559-	struct list_head attachments;
9560-
9561-	void (*free)(struct heap_helper_buffer *buffer);
9562-};
9563-
9564-void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
9565-			     void (*free)(struct heap_helper_buffer *));
9566-
9567-struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
9568-					  int fd_flags);
9569-
9570-extern const struct dma_buf_ops heap_helper_ops;
9571-#endif /* _HEAP_HELPERS_H */
9572diff -Naur a/drivers/dma-buf/heaps/page_pool.c b/drivers/dma-buf/heaps/page_pool.c
9573--- a/drivers/dma-buf/heaps/page_pool.c	1970-01-01 08:00:00.000000000 +0800
9574+++ b/drivers/dma-buf/heaps/page_pool.c	2023-11-14 09:26:42.087243600 +0800
9575@@ -0,0 +1,247 @@
9576+// SPDX-License-Identifier: GPL-2.0
9577+/*
9578+ * DMA BUF page pool system
9579+ *
9580+ * Copyright (C) 2020 Linaro Ltd.
9581+ *
9582+ * Based on the ION page pool code
9583+ * Copyright (C) 2011 Google, Inc.
9584+ */
9585+
9586+#include <linux/freezer.h>
9587+#include <linux/list.h>
9588+#include <linux/slab.h>
9589+#include <linux/swap.h>
9590+#include <linux/sched/signal.h>
9591+#include "page_pool.h"
9592+
9593+static LIST_HEAD(pool_list);
9594+static DEFINE_MUTEX(pool_list_lock);
9595+
9596+static inline
9597+struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
9598+{
9599+	if (fatal_signal_pending(current))
9600+		return NULL;
9601+	return alloc_pages(pool->gfp_mask, pool->order);
9602+}
9603+
9604+static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
9605+					       struct page *page)
9606+{
9607+	__free_pages(page, pool->order);
9608+}
9609+
9610+static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
9611+{
9612+	int index;
9613+
9614+	if (PageHighMem(page))
9615+		index = POOL_HIGHPAGE;
9616+	else
9617+		index = POOL_LOWPAGE;
9618+
9619+	mutex_lock(&pool->mutex);
9620+	list_add_tail(&page->lru, &pool->items[index]);
9621+	pool->count[index]++;
9622+	mutex_unlock(&pool->mutex);
9623+	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
9624+			    1 << pool->order);
9625+}
9626+
9627+static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
9628+{
9629+	struct page *page;
9630+
9631+	mutex_lock(&pool->mutex);
9632+	page = list_first_entry_or_null(&pool->items[index], struct page, lru);
9633+	if (page) {
9634+		pool->count[index]--;
9635+		list_del(&page->lru);
9636+		mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
9637+				    -(1 << pool->order));
9638+	}
9639+	mutex_unlock(&pool->mutex);
9640+
9641+	return page;
9642+}
9643+
9644+static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
9645+{
9646+	struct page *page = NULL;
9647+
9648+	page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
9649+	if (!page)
9650+		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
9651+
9652+	return page;
9653+}
9654+
9655+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
9656+{
9657+	struct page *page = NULL;
9658+
9659+	if (WARN_ON(!pool))
9660+		return NULL;
9661+
9662+	page = dmabuf_page_pool_fetch(pool);
9663+
9664+	if (!page)
9665+		page = dmabuf_page_pool_alloc_pages(pool);
9666+	return page;
9667+}
9668+EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
9669+
9670+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
9671+{
9672+	if (WARN_ON(pool->order != compound_order(page)))
9673+		return;
9674+
9675+	dmabuf_page_pool_add(pool, page);
9676+}
9677+EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
9678+
9679+static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
9680+{
9681+	int count = pool->count[POOL_LOWPAGE];
9682+
9683+	if (high)
9684+		count += pool->count[POOL_HIGHPAGE];
9685+
9686+	return count << pool->order;
9687+}
9688+
9689+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
9690+{
9691+	struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
9692+	int i;
9693+
9694+	if (!pool)
9695+		return NULL;
9696+
9697+	for (i = 0; i < POOL_TYPE_SIZE; i++) {
9698+		pool->count[i] = 0;
9699+		INIT_LIST_HEAD(&pool->items[i]);
9700+	}
9701+	pool->gfp_mask = gfp_mask | __GFP_COMP;
9702+	pool->order = order;
9703+	mutex_init(&pool->mutex);
9704+
9705+	mutex_lock(&pool_list_lock);
9706+	list_add(&pool->list, &pool_list);
9707+	mutex_unlock(&pool_list_lock);
9708+
9709+	return pool;
9710+}
9711+EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
9712+
9713+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
9714+{
9715+	struct page *page;
9716+	int i;
9717+
9718+	/* Remove us from the pool list */
9719+	mutex_lock(&pool_list_lock);
9720+	list_del(&pool->list);
9721+	mutex_unlock(&pool_list_lock);
9722+
9723+	/* Free any remaining pages in the pool */
9724+	for (i = 0; i < POOL_TYPE_SIZE; i++) {
9725+		while ((page = dmabuf_page_pool_remove(pool, i)))
9726+			dmabuf_page_pool_free_pages(pool, page);
9727+	}
9728+
9729+	kfree(pool);
9730+}
9731+EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
9732+
9733+static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
9734+				      int nr_to_scan)
9735+{
9736+	int freed = 0;
9737+	bool high;
9738+
9739+	if (current_is_kswapd())
9740+		high = true;
9741+	else
9742+		high = !!(gfp_mask & __GFP_HIGHMEM);
9743+
9744+	if (nr_to_scan == 0)
9745+		return dmabuf_page_pool_total(pool, high);
9746+
9747+	while (freed < nr_to_scan) {
9748+		struct page *page;
9749+
9750+		/* Try to free low pages first */
9751+		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
9752+		if (!page)
9753+			page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
9754+
9755+		if (!page)
9756+			break;
9757+
9758+		dmabuf_page_pool_free_pages(pool, page);
9759+		freed += (1 << pool->order);
9760+	}
9761+
9762+	return freed;
9763+}
9764+
9765+static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
9766+{
9767+	struct dmabuf_page_pool *pool;
9768+	int nr_total = 0;
9769+	int nr_freed;
9770+	int only_scan = 0;
9771+
9772+	if (!nr_to_scan)
9773+		only_scan = 1;
9774+
9775+	mutex_lock(&pool_list_lock);
9776+	list_for_each_entry(pool, &pool_list, list) {
9777+		if (only_scan) {
9778+			nr_total += dmabuf_page_pool_do_shrink(pool,
9779+							       gfp_mask,
9780+							       nr_to_scan);
9781+		} else {
9782+			nr_freed = dmabuf_page_pool_do_shrink(pool,
9783+							      gfp_mask,
9784+							      nr_to_scan);
9785+			nr_to_scan -= nr_freed;
9786+			nr_total += nr_freed;
9787+			if (nr_to_scan <= 0)
9788+				break;
9789+		}
9790+	}
9791+	mutex_unlock(&pool_list_lock);
9792+
9793+	return nr_total;
9794+}
9795+
9796+static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker,
9797+						   struct shrink_control *sc)
9798+{
9799+	return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
9800+}
9801+
9802+static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker,
9803+						  struct shrink_control *sc)
9804+{
9805+	if (sc->nr_to_scan == 0)
9806+		return 0;
9807+	return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
9808+}
9809+
9810+struct shrinker pool_shrinker = {
9811+	.count_objects = dmabuf_page_pool_shrink_count,
9812+	.scan_objects = dmabuf_page_pool_shrink_scan,
9813+	.seeks = DEFAULT_SEEKS,
9814+	.batch = 0,
9815+};
9816+
9817+static int dmabuf_page_pool_init_shrinker(void)
9818+{
9819+	return register_shrinker(&pool_shrinker);
9820+}
9821+module_init(dmabuf_page_pool_init_shrinker);
9822+MODULE_LICENSE("GPL v2");
9823diff -Naur a/drivers/dma-buf/heaps/page_pool.h b/drivers/dma-buf/heaps/page_pool.h
9824--- a/drivers/dma-buf/heaps/page_pool.h	1970-01-01 08:00:00.000000000 +0800
9825+++ b/drivers/dma-buf/heaps/page_pool.h	2023-11-14 09:26:42.087243600 +0800
9826@@ -0,0 +1,55 @@
9827+/* SPDX-License-Identifier: GPL-2.0 */
9828+/*
9829+ * DMA BUF PagePool implementation
9830+ * Based on earlier ION code by Google
9831+ *
9832+ * Copyright (C) 2011 Google, Inc.
9833+ * Copyright (C) 2020 Linaro Ltd.
9834+ */
9835+
9836+#ifndef _DMABUF_PAGE_POOL_H
9837+#define _DMABUF_PAGE_POOL_H
9838+
9839+#include <linux/device.h>
9840+#include <linux/kref.h>
9841+#include <linux/mm_types.h>
9842+#include <linux/mutex.h>
9843+#include <linux/shrinker.h>
9844+#include <linux/types.h>
9845+
9846+/* page types we track in the pool */
9847+enum {
9848+	POOL_LOWPAGE,      /* Clean lowmem pages */
9849+	POOL_HIGHPAGE,     /* Clean highmem pages */
9850+
9851+	POOL_TYPE_SIZE,
9852+};
9853+
9854+/**
9855+ * struct dmabuf_page_pool - pagepool struct
9856+ * @count[]:		array of number of pages of that type in the pool
9857+ * @items[]:		array of list of pages of the specific type
9858+ * @mutex:		lock protecting this struct and especially the count
9859+ *			item list
9860+ * @gfp_mask:		gfp_mask to use from alloc
9861+ * @order:		order of pages in the pool
9862+ * @list:		list node for list of pools
9863+ *
9864+ * Allows you to keep a pool of pre allocated pages to use
9865+ */
9866+struct dmabuf_page_pool {
9867+	int count[POOL_TYPE_SIZE];
9868+	struct list_head items[POOL_TYPE_SIZE];
9869+	struct mutex mutex;
9870+	gfp_t gfp_mask;
9871+	unsigned int order;
9872+	struct list_head list;
9873+};
9874+
9875+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask,
9876+						 unsigned int order);
9877+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool);
9878+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool);
9879+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page);
9880+
9881+#endif /* _DMABUF_PAGE_POOL_H */
9882diff -Naur a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
9883--- a/drivers/dma-buf/heaps/system_heap.c	2023-09-19 21:04:00.210684653 +0800
9884+++ b/drivers/dma-buf/heaps/system_heap.c	2023-11-14 09:26:42.091243200 +0800
9885@@ -3,7 +3,11 @@
9886  * DMABUF System heap exporter
9887  *
9888  * Copyright (C) 2011 Google, Inc.
9889- * Copyright (C) 2019 Linaro Ltd.
9890+ * Copyright (C) 2019, 2020 Linaro Ltd.
9891+ *
9892+ * Portions based off of Andrew Davis' SRAM heap:
9893+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
9894+ *	Andrew F. Davis <afd@ti.com>
9895  */
9896
9897 #include <linux/dma-buf.h>
9898@@ -15,99 +19,546 @@
9899 #include <linux/module.h>
9900 #include <linux/scatterlist.h>
9901 #include <linux/slab.h>
9902-#include <linux/sched/signal.h>
9903-#include <asm/page.h>
9904+#include <linux/vmalloc.h>
9905+
9906+#include "page_pool.h"
9907+#include "deferred-free-helper.h"
9908+
9909+static struct dma_heap *sys_heap;
9910+static struct dma_heap *sys_uncached_heap;
9911+
9912+struct system_heap_buffer {
9913+	struct dma_heap *heap;
9914+	struct list_head attachments;
9915+	struct mutex lock;
9916+	unsigned long len;
9917+	struct sg_table sg_table;
9918+	int vmap_cnt;
9919+	void *vaddr;
9920+	struct deferred_freelist_item deferred_free;
9921
9922-#include "heap-helpers.h"
9923+	bool uncached;
9924+};
9925+
9926+struct dma_heap_attachment {
9927+	struct device *dev;
9928+	struct sg_table *table;
9929+	struct list_head list;
9930+	bool mapped;
9931+
9932+	bool uncached;
9933+};
9934
9935-struct dma_heap *sys_heap;
9936+#define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
9937+				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
9938+				| __GFP_COMP)
9939+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
9940+static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
9941+/*
9942+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
9943+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
9944+ * of order 0 pages can significantly improve the performance of many IOMMUs
9945+ * by reducing TLB pressure and time spent updating page tables.
9946+ */
9947+static const unsigned int orders[] = {8, 4, 0};
9948+#define NUM_ORDERS ARRAY_SIZE(orders)
9949+struct dmabuf_page_pool *pools[NUM_ORDERS];
9950
9951-static void system_heap_free(struct heap_helper_buffer *buffer)
9952+static struct sg_table *dup_sg_table(struct sg_table *table)
9953 {
9954-	pgoff_t pg;
9955+	struct sg_table *new_table;
9956+	int ret, i;
9957+	struct scatterlist *sg, *new_sg;
9958+
9959+	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
9960+	if (!new_table)
9961+		return ERR_PTR(-ENOMEM);
9962+
9963+	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
9964+	if (ret) {
9965+		kfree(new_table);
9966+		return ERR_PTR(-ENOMEM);
9967+	}
9968
9969-	for (pg = 0; pg < buffer->pagecount; pg++)
9970-		__free_page(buffer->pages[pg]);
9971-	kfree(buffer->pages);
9972-	kfree(buffer);
9973+	new_sg = new_table->sgl;
9974+	for_each_sgtable_sg(table, sg, i) {
9975+		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
9976+		new_sg = sg_next(new_sg);
9977+	}
9978+
9979+	return new_table;
9980 }
9981
9982-static int system_heap_allocate(struct dma_heap *heap,
9983-				unsigned long len,
9984-				unsigned long fd_flags,
9985-				unsigned long heap_flags)
9986+static int system_heap_attach(struct dma_buf *dmabuf,
9987+			      struct dma_buf_attachment *attachment)
9988 {
9989-	struct heap_helper_buffer *helper_buffer;
9990-	struct dma_buf *dmabuf;
9991-	int ret = -ENOMEM;
9992-	pgoff_t pg;
9993+	struct system_heap_buffer *buffer = dmabuf->priv;
9994+	struct dma_heap_attachment *a;
9995+	struct sg_table *table;
9996+
9997+	a = kzalloc(sizeof(*a), GFP_KERNEL);
9998+	if (!a)
9999+		return -ENOMEM;
10000
10001-	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
10002-	if (!helper_buffer)
10003+	table = dup_sg_table(&buffer->sg_table);
10004+	if (IS_ERR(table)) {
10005+		kfree(a);
10006 		return -ENOMEM;
10007+	}
10008+
10009+	a->table = table;
10010+	a->dev = attachment->dev;
10011+	INIT_LIST_HEAD(&a->list);
10012+	a->mapped = false;
10013+	a->uncached = buffer->uncached;
10014+	attachment->priv = a;
10015+
10016+	mutex_lock(&buffer->lock);
10017+	list_add(&a->list, &buffer->attachments);
10018+	mutex_unlock(&buffer->lock);
10019+
10020+	return 0;
10021+}
10022+
10023+static void system_heap_detach(struct dma_buf *dmabuf,
10024+			       struct dma_buf_attachment *attachment)
10025+{
10026+	struct system_heap_buffer *buffer = dmabuf->priv;
10027+	struct dma_heap_attachment *a = attachment->priv;
10028+
10029+	mutex_lock(&buffer->lock);
10030+	list_del(&a->list);
10031+	mutex_unlock(&buffer->lock);
10032+
10033+	sg_free_table(a->table);
10034+	kfree(a->table);
10035+	kfree(a);
10036+}
10037+
10038+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
10039+						enum dma_data_direction direction)
10040+{
10041+	struct dma_heap_attachment *a = attachment->priv;
10042+	struct sg_table *table = a->table;
10043+	int attr = 0;
10044+	int ret;
10045+
10046+	if (a->uncached)
10047+		attr = DMA_ATTR_SKIP_CPU_SYNC;
10048+
10049+	ret = dma_map_sgtable(attachment->dev, table, direction, attr);
10050+	if (ret)
10051+		return ERR_PTR(ret);
10052+
10053+	a->mapped = true;
10054+	return table;
10055+}
10056+
10057+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
10058+				      struct sg_table *table,
10059+				      enum dma_data_direction direction)
10060+{
10061+	struct dma_heap_attachment *a = attachment->priv;
10062+	int attr = 0;
10063+
10064+	if (a->uncached)
10065+		attr = DMA_ATTR_SKIP_CPU_SYNC;
10066+	a->mapped = false;
10067+	dma_unmap_sgtable(attachment->dev, table, direction, attr);
10068+}
10069+
10070+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
10071+						enum dma_data_direction direction)
10072+{
10073+	struct system_heap_buffer *buffer = dmabuf->priv;
10074+	struct dma_heap_attachment *a;
10075+
10076+	mutex_lock(&buffer->lock);
10077+
10078+	if (buffer->vmap_cnt)
10079+		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
10080+
10081+	if (!buffer->uncached) {
10082+		list_for_each_entry(a, &buffer->attachments, list) {
10083+			if (!a->mapped)
10084+				continue;
10085+			dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
10086+		}
10087+	}
10088+	mutex_unlock(&buffer->lock);
10089+
10090+	return 0;
10091+}
10092+
10093+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
10094+					      enum dma_data_direction direction)
10095+{
10096+	struct system_heap_buffer *buffer = dmabuf->priv;
10097+	struct dma_heap_attachment *a;
10098+
10099+	mutex_lock(&buffer->lock);
10100+
10101+	if (buffer->vmap_cnt)
10102+		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
10103+
10104+	if (!buffer->uncached) {
10105+		list_for_each_entry(a, &buffer->attachments, list) {
10106+			if (!a->mapped)
10107+				continue;
10108+			dma_sync_sgtable_for_device(a->dev, a->table, direction);
10109+		}
10110+	}
10111+	mutex_unlock(&buffer->lock);
10112+
10113+	return 0;
10114+}
10115+
10116+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
10117+{
10118+	struct system_heap_buffer *buffer = dmabuf->priv;
10119+	struct sg_table *table = &buffer->sg_table;
10120+	unsigned long addr = vma->vm_start;
10121+	struct sg_page_iter piter;
10122+	int ret;
10123+
10124+	if (buffer->uncached)
10125+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
10126+
10127+	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
10128+		struct page *page = sg_page_iter_page(&piter);
10129+
10130+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
10131+				      vma->vm_page_prot);
10132+		if (ret)
10133+			return ret;
10134+		addr += PAGE_SIZE;
10135+		if (addr >= vma->vm_end)
10136+			return 0;
10137+	}
10138+	return 0;
10139+}
10140+
10141+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
10142+{
10143+	struct sg_table *table = &buffer->sg_table;
10144+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
10145+	struct page **pages = vmalloc(sizeof(struct page *) * npages);
10146+	struct page **tmp = pages;
10147+	struct sg_page_iter piter;
10148+	pgprot_t pgprot = PAGE_KERNEL;
10149+	void *vaddr;
10150+
10151+	if (!pages)
10152+		return ERR_PTR(-ENOMEM);
10153+
10154+	if (buffer->uncached)
10155+		pgprot = pgprot_writecombine(PAGE_KERNEL);
10156+
10157+	for_each_sgtable_page(table, &piter, 0) {
10158+		WARN_ON(tmp - pages >= npages);
10159+		*tmp++ = sg_page_iter_page(&piter);
10160+	}
10161+
10162+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
10163+	vfree(pages);
10164+
10165+	if (!vaddr)
10166+		return ERR_PTR(-ENOMEM);
10167+
10168+	return vaddr;
10169+}
10170+
10171+static void *system_heap_vmap(struct dma_buf *dmabuf)
10172+{
10173+	struct system_heap_buffer *buffer = dmabuf->priv;
10174+	void *vaddr;
10175+
10176+	mutex_lock(&buffer->lock);
10177+	if (buffer->vmap_cnt) {
10178+		buffer->vmap_cnt++;
10179+		vaddr = buffer->vaddr;
10180+		goto out;
10181+	}
10182
10183-	init_heap_helper_buffer(helper_buffer, system_heap_free);
10184-	helper_buffer->heap = heap;
10185-	helper_buffer->size = len;
10186-
10187-	helper_buffer->pagecount = len / PAGE_SIZE;
10188-	helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
10189-					     sizeof(*helper_buffer->pages),
10190-					     GFP_KERNEL);
10191-	if (!helper_buffer->pages) {
10192-		ret = -ENOMEM;
10193-		goto err0;
10194+	vaddr = system_heap_do_vmap(buffer);
10195+	if (IS_ERR(vaddr))
10196+		goto out;
10197+
10198+	buffer->vaddr = vaddr;
10199+	buffer->vmap_cnt++;
10200+out:
10201+	mutex_unlock(&buffer->lock);
10202+
10203+	return vaddr;
10204+}
10205+
10206+static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
10207+{
10208+	struct system_heap_buffer *buffer = dmabuf->priv;
10209+
10210+	mutex_lock(&buffer->lock);
10211+	if (!--buffer->vmap_cnt) {
10212+		vunmap(buffer->vaddr);
10213+		buffer->vaddr = NULL;
10214+	}
10215+	mutex_unlock(&buffer->lock);
10216+}
10217+
10218+static int system_heap_zero_buffer(struct system_heap_buffer *buffer)
10219+{
10220+	struct sg_table *sgt = &buffer->sg_table;
10221+	struct sg_page_iter piter;
10222+	struct page *p;
10223+	void *vaddr;
10224+	int ret = 0;
10225+
10226+	for_each_sgtable_page(sgt, &piter, 0) {
10227+		p = sg_page_iter_page(&piter);
10228+		vaddr = kmap_atomic(p);
10229+		memset(vaddr, 0, PAGE_SIZE);
10230+		kunmap_atomic(vaddr);
10231+	}
10232+
10233+	return ret;
10234+}
10235+
10236+static void system_heap_buf_free(struct deferred_freelist_item *item,
10237+				 enum df_reason reason)
10238+{
10239+	struct system_heap_buffer *buffer;
10240+	struct sg_table *table;
10241+	struct scatterlist *sg;
10242+	int i, j;
10243+
10244+	buffer = container_of(item, struct system_heap_buffer, deferred_free);
10245+	/* Zero the buffer pages before adding back to the pool */
10246+	if (reason == DF_NORMAL)
10247+		if (system_heap_zero_buffer(buffer))
10248+			reason = DF_UNDER_PRESSURE; // On failure, just free
10249+
10250+	table = &buffer->sg_table;
10251+	for_each_sg(table->sgl, sg, table->nents, i) {
10252+		struct page *page = sg_page(sg);
10253+
10254+		if (reason == DF_UNDER_PRESSURE) {
10255+			__free_pages(page, compound_order(page));
10256+		} else {
10257+			for (j = 0; j < NUM_ORDERS; j++) {
10258+				if (compound_order(page) == orders[j])
10259+					break;
10260+			}
10261+			dmabuf_page_pool_free(pools[j], page);
10262+		}
10263+	}
10264+	sg_free_table(table);
10265+	kfree(buffer);
10266+}
10267+
10268+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
10269+{
10270+	struct system_heap_buffer *buffer = dmabuf->priv;
10271+	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
10272+
10273+	deferred_free(&buffer->deferred_free, system_heap_buf_free, npages);
10274+}
10275+
10276+static const struct dma_buf_ops system_heap_buf_ops = {
10277+	.attach = system_heap_attach,
10278+	.detach = system_heap_detach,
10279+	.map_dma_buf = system_heap_map_dma_buf,
10280+	.unmap_dma_buf = system_heap_unmap_dma_buf,
10281+	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
10282+	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
10283+	.mmap = system_heap_mmap,
10284+	.vmap = system_heap_vmap,
10285+	.vunmap = system_heap_vunmap,
10286+	.release = system_heap_dma_buf_release,
10287+};
10288+
10289+static struct page *alloc_largest_available(unsigned long size,
10290+					    unsigned int max_order)
10291+{
10292+	struct page *page;
10293+	int i;
10294+
10295+	for (i = 0; i < NUM_ORDERS; i++) {
10296+		if (size <  (PAGE_SIZE << orders[i]))
10297+			continue;
10298+		if (max_order < orders[i])
10299+			continue;
10300+		page = dmabuf_page_pool_alloc(pools[i]);
10301+		if (!page)
10302+			continue;
10303+		return page;
10304 	}
10305+	return NULL;
10306+}
10307
10308-	for (pg = 0; pg < helper_buffer->pagecount; pg++) {
10309+static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
10310+					       unsigned long len,
10311+					       unsigned long fd_flags,
10312+					       unsigned long heap_flags,
10313+					       bool uncached)
10314+{
10315+	struct system_heap_buffer *buffer;
10316+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
10317+	unsigned long size_remaining = len;
10318+	unsigned int max_order = orders[0];
10319+	struct dma_buf *dmabuf;
10320+	struct sg_table *table;
10321+	struct scatterlist *sg;
10322+	struct list_head pages;
10323+	struct page *page, *tmp_page;
10324+	int i, ret = -ENOMEM;
10325+
10326+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
10327+	if (!buffer)
10328+		return ERR_PTR(-ENOMEM);
10329+
10330+	INIT_LIST_HEAD(&buffer->attachments);
10331+	mutex_init(&buffer->lock);
10332+	buffer->heap = heap;
10333+	buffer->len = len;
10334+	buffer->uncached = uncached;
10335+
10336+	INIT_LIST_HEAD(&pages);
10337+	i = 0;
10338+	while (size_remaining > 0) {
10339 		/*
10340 		 * Avoid trying to allocate memory if the process
10341-		 * has been killed by by SIGKILL
10342+		 * has been killed by SIGKILL
10343 		 */
10344 		if (fatal_signal_pending(current))
10345-			goto err1;
10346+			goto free_buffer;
10347
10348-		helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
10349-		if (!helper_buffer->pages[pg])
10350-			goto err1;
10351+		page = alloc_largest_available(size_remaining, max_order);
10352+		if (!page)
10353+			goto free_buffer;
10354+
10355+		list_add_tail(&page->lru, &pages);
10356+		size_remaining -= page_size(page);
10357+		max_order = compound_order(page);
10358+		i++;
10359+	}
10360+
10361+	table = &buffer->sg_table;
10362+	if (sg_alloc_table(table, i, GFP_KERNEL))
10363+		goto free_buffer;
10364+
10365+	sg = table->sgl;
10366+	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
10367+		sg_set_page(sg, page, page_size(page), 0);
10368+		sg = sg_next(sg);
10369+		list_del(&page->lru);
10370 	}
10371
10372 	/* create the dmabuf */
10373-	dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
10374+	exp_info.exp_name = dma_heap_get_name(heap);
10375+	exp_info.ops = &system_heap_buf_ops;
10376+	exp_info.size = buffer->len;
10377+	exp_info.flags = fd_flags;
10378+	exp_info.priv = buffer;
10379+	dmabuf = dma_buf_export(&exp_info);
10380 	if (IS_ERR(dmabuf)) {
10381 		ret = PTR_ERR(dmabuf);
10382-		goto err1;
10383+		goto free_pages;
10384+	}
10385+
10386+	/*
10387+	 * For uncached buffers, we need to initially flush cpu cache, since
10388+	 * the __GFP_ZERO on the allocation means the zeroing was done by the
10389+	 * cpu and thus it is likely cached. Map (and implicitly flush) and
10390+	 * unmap it now so we don't get corruption later on.
10391+	 */
10392+	if (buffer->uncached) {
10393+		dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
10394+		dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
10395 	}
10396
10397-	helper_buffer->dmabuf = dmabuf;
10398+	return dmabuf;
10399
10400-	ret = dma_buf_fd(dmabuf, fd_flags);
10401-	if (ret < 0) {
10402-		dma_buf_put(dmabuf);
10403-		/* just return, as put will call release and that will free */
10404-		return ret;
10405+free_pages:
10406+	for_each_sgtable_sg(table, sg, i) {
10407+		struct page *p = sg_page(sg);
10408+
10409+		__free_pages(p, compound_order(p));
10410 	}
10411+	sg_free_table(table);
10412+free_buffer:
10413+	list_for_each_entry_safe(page, tmp_page, &pages, lru)
10414+		__free_pages(page, compound_order(page));
10415+	kfree(buffer);
10416
10417-	return ret;
10418+	return ERR_PTR(ret);
10419+}
10420
10421-err1:
10422-	while (pg > 0)
10423-		__free_page(helper_buffer->pages[--pg]);
10424-	kfree(helper_buffer->pages);
10425-err0:
10426-	kfree(helper_buffer);
10427+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
10428+					    unsigned long len,
10429+					    unsigned long fd_flags,
10430+					    unsigned long heap_flags)
10431+{
10432+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
10433+}
10434
10435-	return ret;
10436+static long system_get_pool_size(struct dma_heap *heap)
10437+{
10438+	int i;
10439+	long num_pages = 0;
10440+	struct dmabuf_page_pool **pool;
10441+
10442+	pool = pools;
10443+	for (i = 0; i < NUM_ORDERS; i++, pool++) {
10444+		num_pages += ((*pool)->count[POOL_LOWPAGE] +
10445+			      (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order;
10446+	}
10447+
10448+	return num_pages << PAGE_SHIFT;
10449 }
10450
10451 static const struct dma_heap_ops system_heap_ops = {
10452 	.allocate = system_heap_allocate,
10453+	.get_pool_size = system_get_pool_size,
10454+};
10455+
10456+static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
10457+						     unsigned long len,
10458+						     unsigned long fd_flags,
10459+						     unsigned long heap_flags)
10460+{
10461+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
10462+}
10463+
10464+/* Dummy function to be used until we can call coerce_mask_and_coherent */
10465+static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
10466+							    unsigned long len,
10467+							    unsigned long fd_flags,
10468+							    unsigned long heap_flags)
10469+{
10470+	return ERR_PTR(-EBUSY);
10471+}
10472+
10473+static struct dma_heap_ops system_uncached_heap_ops = {
10474+	/* After system_heap_create is complete, we will swap this */
10475+	.allocate = system_uncached_heap_not_initialized,
10476 };
10477
10478 static int system_heap_create(void)
10479 {
10480 	struct dma_heap_export_info exp_info;
10481-	int ret = 0;
10482+	int i;
10483+
10484+	for (i = 0; i < NUM_ORDERS; i++) {
10485+		pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]);
10486+
10487+		if (!pools[i]) {
10488+			int j;
10489+
10490+			pr_err("%s: page pool creation failed!\n", __func__);
10491+			for (j = 0; j < i; j++)
10492+				dmabuf_page_pool_destroy(pools[j]);
10493+			return -ENOMEM;
10494+		}
10495+	}
10496
10497 	exp_info.name = "system";
10498 	exp_info.ops = &system_heap_ops;
10499@@ -115,9 +566,21 @@
10500
10501 	sys_heap = dma_heap_add(&exp_info);
10502 	if (IS_ERR(sys_heap))
10503-		ret = PTR_ERR(sys_heap);
10504+		return PTR_ERR(sys_heap);
10505
10506-	return ret;
10507+	exp_info.name = "system-uncached";
10508+	exp_info.ops = &system_uncached_heap_ops;
10509+	exp_info.priv = NULL;
10510+
10511+	sys_uncached_heap = dma_heap_add(&exp_info);
10512+	if (IS_ERR(sys_uncached_heap))
10513+		return PTR_ERR(sys_uncached_heap);
10514+
10515+	dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_heap), DMA_BIT_MASK(64));
10516+	mb(); /* make sure we only set allocate after dma_mask is set */
10517+	system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
10518+
10519+	return 0;
10520 }
10521 module_init(system_heap_create);
10522 MODULE_LICENSE("GPL v2");
10523